blob: ae6e71b3295cb791d7c4fb763851e376fd1694ca [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
Rob Herring8c369262011-08-03 18:12:05 +010019#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010020#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Rob Herring8c369262011-08-03 18:12:05 +010023#include <linux/of.h>
24#include <linux/of_address.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025
26#include <asm/cacheflush.h>
Russell King4374d642014-03-19 15:39:09 +000027#include <asm/cputype.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010028#include <asm/hardware/cache-l2x0.h>
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +010029#include "cache-tauros3.h"
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010030#include "cache-aurora-l2.h"
Catalin Marinas382266a2007-02-05 14:48:19 +010031
Russell Kingc02642b2014-03-15 16:47:54 +000032struct l2c_init_data {
Russell King051334b2014-03-15 23:04:10 +000033 const char *type;
Russell King0493aef2014-03-15 23:26:24 +000034 unsigned way_size_0;
Russell King3b8bad52014-03-15 16:47:57 +000035 unsigned num_lock;
Russell Kingc02642b2014-03-15 16:47:54 +000036 void (*of_parse)(const struct device_node *, u32 *, u32 *);
Russell King3b8bad52014-03-15 16:47:57 +000037 void (*enable)(void __iomem *, u32, unsigned);
Russell King75461f52014-03-15 16:48:07 +000038 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
Russell King9846dfc2014-03-15 16:47:55 +000039 void (*save)(void __iomem *);
Russell Kingc02642b2014-03-15 16:47:54 +000040 struct outer_cache_fns outer_cache;
41};
42
Catalin Marinas382266a2007-02-05 14:48:19 +010043#define CACHE_LINE_SIZE 32
44
45static void __iomem *l2x0_base;
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050046static DEFINE_RAW_SPINLOCK(l2x0_lock);
Russell King3e175ca2011-09-18 11:27:30 +010047static u32 l2x0_way_mask; /* Bitmask of active ways */
48static u32 l2x0_size;
Will Deaconf154fe92012-04-20 17:21:08 +010049static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
Catalin Marinas382266a2007-02-05 14:48:19 +010050
Barry Song91c2ebb2011-09-30 14:43:12 +010051struct l2x0_regs l2x0_saved_regs;
52
Russell King37abcdb2014-03-15 16:47:50 +000053/*
54 * Common code for all cache controllers.
55 */
Russell King83841fe2014-03-15 16:48:14 +000056static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010057{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010058 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010059 while (readl_relaxed(reg) & mask)
Barry Song1caf3092011-09-09 10:30:34 +010060 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010061}
62
Russell King2b2a87a2014-03-16 17:19:21 +000063/*
Russell King8abd2592014-03-16 17:38:08 +000064 * By default, we write directly to secure registers. Platforms must
65 * override this if they are running non-secure.
66 */
67static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
68{
69 if (val == readl_relaxed(base + reg))
70 return;
71 if (outer_cache.write_sec)
72 outer_cache.write_sec(val, reg);
73 else
74 writel_relaxed(val, base + reg);
75}
76
77/*
Russell King2b2a87a2014-03-16 17:19:21 +000078 * This should only be called when we have a requirement that the
79 * register be written due to a work-around, as platforms running
80 * in non-secure mode may not be able to access this register.
81 */
82static inline void l2c_set_debug(void __iomem *base, unsigned long val)
83{
Russell King678ea282014-03-16 19:38:25 +000084 l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
Russell King2b2a87a2014-03-16 17:19:21 +000085}
86
Russell Kingdf5dd4c2014-03-15 16:47:56 +000087static void __l2c_op_way(void __iomem *reg)
88{
89 writel_relaxed(l2x0_way_mask, reg);
Russell King83841fe2014-03-15 16:48:14 +000090 l2c_wait_mask(reg, l2x0_way_mask);
Russell Kingdf5dd4c2014-03-15 16:47:56 +000091}
92
Russell King37abcdb2014-03-15 16:47:50 +000093static inline void l2c_unlock(void __iomem *base, unsigned num)
94{
95 unsigned i;
96
97 for (i = 0; i < num; i++) {
98 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
99 i * L2X0_LOCKDOWN_STRIDE);
100 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
101 i * L2X0_LOCKDOWN_STRIDE);
102 }
103}
104
Russell King3b8bad52014-03-15 16:47:57 +0000105/*
106 * Enable the L2 cache controller. This function must only be
107 * called when the cache controller is known to be disabled.
108 */
109static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
110{
111 unsigned long flags;
112
Russell King8abd2592014-03-16 17:38:08 +0000113 l2c_write_sec(aux, base, L2X0_AUX_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +0000114
Russell King17f3f992014-03-17 17:15:02 +0000115 l2c_unlock(base, num_lock);
116
Russell King3b8bad52014-03-15 16:47:57 +0000117 local_irq_save(flags);
118 __l2c_op_way(base + L2X0_INV_WAY);
119 writel_relaxed(0, base + sync_reg_offset);
120 l2c_wait_mask(base + sync_reg_offset, 1);
121 local_irq_restore(flags);
122
Russell King8abd2592014-03-16 17:38:08 +0000123 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +0000124}
125
126static void l2c_disable(void)
127{
128 void __iomem *base = l2x0_base;
129
130 outer_cache.flush_all();
Russell King8abd2592014-03-16 17:38:08 +0000131 l2c_write_sec(0, base, L2X0_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +0000132 dsb(st);
133}
134
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100135#ifdef CONFIG_CACHE_PL310
136static inline void cache_wait(void __iomem *reg, unsigned long mask)
137{
138 /* cache operations by line are atomic on PL310 */
139}
140#else
Russell King83841fe2014-03-15 16:48:14 +0000141#define cache_wait l2c_wait_mask
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100142#endif
143
Catalin Marinas382266a2007-02-05 14:48:19 +0100144static inline void cache_sync(void)
145{
Russell King3d107432009-11-19 11:41:09 +0000146 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +0100147
Will Deaconf154fe92012-04-20 17:21:08 +0100148 writel_relaxed(0, base + sync_reg_offset);
Russell King3d107432009-11-19 11:41:09 +0000149 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100150}
151
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100152#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Will Deaconab4d5362012-04-20 17:22:11 +0100153static inline void debug_writel(unsigned long val)
154{
Russell King678ea282014-03-16 19:38:25 +0000155 l2c_set_debug(l2x0_base, val);
Will Deaconab4d5362012-04-20 17:22:11 +0100156}
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100157#else
158/* Optimised out for non-errata case */
159static inline void debug_writel(unsigned long val)
160{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100161}
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100162#endif
163
Catalin Marinas23107c52010-03-24 16:48:53 +0100164static void l2x0_cache_sync(void)
165{
166 unsigned long flags;
167
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500168 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100169 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500170 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100171}
172
Will Deacon38a89142011-07-01 14:36:19 +0100173static void __l2x0_flush_all(void)
174{
175 debug_writel(0x03);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000176 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
Will Deacon38a89142011-07-01 14:36:19 +0100177 cache_sync();
178 debug_writel(0x00);
179}
180
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530181static void l2x0_flush_all(void)
182{
183 unsigned long flags;
184
185 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500186 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100187 __l2x0_flush_all();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500188 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530189}
190
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530191static void l2x0_disable(void)
192{
193 unsigned long flags;
194
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500195 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100196 __l2x0_flush_all();
Russell King8abd2592014-03-16 17:38:08 +0000197 l2c_write_sec(0, l2x0_base, L2X0_CTRL);
Will Deacon9781aa82013-06-12 09:59:59 +0100198 dsb(st);
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500199 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530200}
201
Russell Kingddf7d792014-03-28 14:18:35 +0000202static void l2c_save(void __iomem *base)
203{
204 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
205}
206
Russell King75461f52014-03-15 16:48:07 +0000207/*
Russell King6a28cf52014-03-15 18:55:53 +0000208 * L2C-210 specific code.
209 *
210 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
211 * ensure that no background operation is running. The way operations
212 * are all background tasks.
213 *
214 * While a background operation is in progress, any new operation is
215 * ignored (unspecified whether this causes an error.) Thankfully, not
216 * used on SMP.
217 *
218 * Never has a different sync register other than L2X0_CACHE_SYNC, but
219 * we use sync_reg_offset here so we can share some of this with L2C-310.
220 */
221static void __l2c210_cache_sync(void __iomem *base)
222{
223 writel_relaxed(0, base + sync_reg_offset);
224}
225
226static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
227 unsigned long end)
228{
229 while (start < end) {
230 writel_relaxed(start, reg);
231 start += CACHE_LINE_SIZE;
232 }
233}
234
235static void l2c210_inv_range(unsigned long start, unsigned long end)
236{
237 void __iomem *base = l2x0_base;
238
239 if (start & (CACHE_LINE_SIZE - 1)) {
240 start &= ~(CACHE_LINE_SIZE - 1);
241 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
242 start += CACHE_LINE_SIZE;
243 }
244
245 if (end & (CACHE_LINE_SIZE - 1)) {
246 end &= ~(CACHE_LINE_SIZE - 1);
247 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
248 }
249
250 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
251 __l2c210_cache_sync(base);
252}
253
254static void l2c210_clean_range(unsigned long start, unsigned long end)
255{
256 void __iomem *base = l2x0_base;
257
258 start &= ~(CACHE_LINE_SIZE - 1);
259 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
260 __l2c210_cache_sync(base);
261}
262
263static void l2c210_flush_range(unsigned long start, unsigned long end)
264{
265 void __iomem *base = l2x0_base;
266
267 start &= ~(CACHE_LINE_SIZE - 1);
268 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
269 __l2c210_cache_sync(base);
270}
271
272static void l2c210_flush_all(void)
273{
274 void __iomem *base = l2x0_base;
275
276 BUG_ON(!irqs_disabled());
277
278 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
279 __l2c210_cache_sync(base);
280}
281
282static void l2c210_sync(void)
283{
284 __l2c210_cache_sync(l2x0_base);
285}
286
287static void l2c210_resume(void)
288{
289 void __iomem *base = l2x0_base;
290
291 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
292 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
293}
294
295static const struct l2c_init_data l2c210_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000296 .type = "L2C-210",
Russell King0493aef2014-03-15 23:26:24 +0000297 .way_size_0 = SZ_8K,
Russell King6a28cf52014-03-15 18:55:53 +0000298 .num_lock = 1,
299 .enable = l2c_enable,
Russell Kingddf7d792014-03-28 14:18:35 +0000300 .save = l2c_save,
Russell King6a28cf52014-03-15 18:55:53 +0000301 .outer_cache = {
302 .inv_range = l2c210_inv_range,
303 .clean_range = l2c210_clean_range,
304 .flush_range = l2c210_flush_range,
305 .flush_all = l2c210_flush_all,
306 .disable = l2c_disable,
307 .sync = l2c210_sync,
308 .resume = l2c210_resume,
309 },
310};
311
312/*
Russell King733c6bb2014-03-15 21:29:28 +0000313 * L2C-220 specific code.
314 *
315 * All operations are background operations: they have to be waited for.
316 * Conflicting requests generate a slave error (which will cause an
317 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
318 * sync register here.
319 *
320 * However, we can re-use the l2c210_resume call.
321 */
322static inline void __l2c220_cache_sync(void __iomem *base)
323{
324 writel_relaxed(0, base + L2X0_CACHE_SYNC);
325 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
326}
327
328static void l2c220_op_way(void __iomem *base, unsigned reg)
329{
330 unsigned long flags;
331
332 raw_spin_lock_irqsave(&l2x0_lock, flags);
333 __l2c_op_way(base + reg);
334 __l2c220_cache_sync(base);
335 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
336}
337
338static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
339 unsigned long end, unsigned long flags)
340{
341 raw_spinlock_t *lock = &l2x0_lock;
342
343 while (start < end) {
344 unsigned long blk_end = start + min(end - start, 4096UL);
345
346 while (start < blk_end) {
347 l2c_wait_mask(reg, 1);
348 writel_relaxed(start, reg);
349 start += CACHE_LINE_SIZE;
350 }
351
352 if (blk_end < end) {
353 raw_spin_unlock_irqrestore(lock, flags);
354 raw_spin_lock_irqsave(lock, flags);
355 }
356 }
357
358 return flags;
359}
360
361static void l2c220_inv_range(unsigned long start, unsigned long end)
362{
363 void __iomem *base = l2x0_base;
364 unsigned long flags;
365
366 raw_spin_lock_irqsave(&l2x0_lock, flags);
367 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
368 if (start & (CACHE_LINE_SIZE - 1)) {
369 start &= ~(CACHE_LINE_SIZE - 1);
370 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
371 start += CACHE_LINE_SIZE;
372 }
373
374 if (end & (CACHE_LINE_SIZE - 1)) {
375 end &= ~(CACHE_LINE_SIZE - 1);
376 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
377 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
378 }
379 }
380
381 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
382 start, end, flags);
383 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
384 __l2c220_cache_sync(base);
385 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
386}
387
388static void l2c220_clean_range(unsigned long start, unsigned long end)
389{
390 void __iomem *base = l2x0_base;
391 unsigned long flags;
392
393 start &= ~(CACHE_LINE_SIZE - 1);
394 if ((end - start) >= l2x0_size) {
395 l2c220_op_way(base, L2X0_CLEAN_WAY);
396 return;
397 }
398
399 raw_spin_lock_irqsave(&l2x0_lock, flags);
400 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
401 start, end, flags);
402 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
403 __l2c220_cache_sync(base);
404 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
405}
406
407static void l2c220_flush_range(unsigned long start, unsigned long end)
408{
409 void __iomem *base = l2x0_base;
410 unsigned long flags;
411
412 start &= ~(CACHE_LINE_SIZE - 1);
413 if ((end - start) >= l2x0_size) {
414 l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
415 return;
416 }
417
418 raw_spin_lock_irqsave(&l2x0_lock, flags);
419 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
420 start, end, flags);
421 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
422 __l2c220_cache_sync(base);
423 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
424}
425
426static void l2c220_flush_all(void)
427{
428 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
429}
430
431static void l2c220_sync(void)
432{
433 unsigned long flags;
434
435 raw_spin_lock_irqsave(&l2x0_lock, flags);
436 __l2c220_cache_sync(l2x0_base);
437 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
438}
439
Russell Kinga4b041a2014-04-11 00:48:25 +0100440static void l2c220_enable(void __iomem *base, u32 aux, unsigned num_lock)
441{
442 /*
443 * Always enable non-secure access to the lockdown registers -
444 * we write to them as part of the L2C enable sequence so they
445 * need to be accessible.
446 */
447 aux |= L220_AUX_CTRL_NS_LOCKDOWN;
448
449 l2c_enable(base, aux, num_lock);
450}
451
Russell King733c6bb2014-03-15 21:29:28 +0000452static const struct l2c_init_data l2c220_data = {
Russell King051334b2014-03-15 23:04:10 +0000453 .type = "L2C-220",
Russell King0493aef2014-03-15 23:26:24 +0000454 .way_size_0 = SZ_8K,
Russell King733c6bb2014-03-15 21:29:28 +0000455 .num_lock = 1,
Russell Kinga4b041a2014-04-11 00:48:25 +0100456 .enable = l2c220_enable,
Russell Kingddf7d792014-03-28 14:18:35 +0000457 .save = l2c_save,
Russell King733c6bb2014-03-15 21:29:28 +0000458 .outer_cache = {
459 .inv_range = l2c220_inv_range,
460 .clean_range = l2c220_clean_range,
461 .flush_range = l2c220_flush_range,
462 .flush_all = l2c220_flush_all,
463 .disable = l2c_disable,
464 .sync = l2c220_sync,
465 .resume = l2c210_resume,
466 },
467};
468
469/*
Russell King75461f52014-03-15 16:48:07 +0000470 * L2C-310 specific code.
471 *
Russell Kingf7773322014-03-15 20:51:47 +0000472 * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
473 * and the way operations are all background tasks. However, issuing an
474 * operation while a background operation is in progress results in a
475 * SLVERR response. We can reuse:
476 *
477 * __l2c210_cache_sync (using sync_reg_offset)
478 * l2c210_sync
479 * l2c210_inv_range (if 588369 is not applicable)
480 * l2c210_clean_range
481 * l2c210_flush_range (if 588369 is not applicable)
482 * l2c210_flush_all (if 727915 is not applicable)
483 *
Russell King75461f52014-03-15 16:48:07 +0000484 * Errata:
485 * 588369: PL310 R0P0->R1P0, fixed R2P0.
486 * Affects: all clean+invalidate operations
487 * clean and invalidate skips the invalidate step, so we need to issue
488 * separate operations. We also require the above debug workaround
489 * enclosing this code fragment on affected parts. On unaffected parts,
490 * we must not use this workaround without the debug register writes
491 * to avoid exposing a problem similar to 727915.
492 *
493 * 727915: PL310 R2P0->R3P0, fixed R3P1.
494 * Affects: clean+invalidate by way
495 * clean and invalidate by way runs in the background, and a store can
496 * hit the line between the clean operation and invalidate operation,
497 * resulting in the store being lost.
498 *
Russell Kinga8875a02014-03-16 20:02:06 +0000499 * 752271: PL310 R3P0->R3P1-50REL0, fixed R3P2.
500 * Affects: 8x64-bit (double fill) line fetches
501 * double fill line fetches can fail to cause dirty data to be evicted
502 * from the cache before the new data overwrites the second line.
503 *
Russell King75461f52014-03-15 16:48:07 +0000504 * 753970: PL310 R3P0, fixed R3P1.
505 * Affects: sync
506 * prevents merging writes after the sync operation, until another L2C
507 * operation is performed (or a number of other conditions.)
508 *
509 * 769419: PL310 R0P0->R3P1, fixed R3P2.
510 * Affects: store buffer
511 * store buffer is not automatically drained.
512 */
Russell Kingebd4219f2014-03-15 19:08:11 +0000513static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
514{
515 void __iomem *base = l2x0_base;
516
517 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
518 unsigned long flags;
519
520 /* Erratum 588369 for both clean+invalidate operations */
521 raw_spin_lock_irqsave(&l2x0_lock, flags);
522 l2c_set_debug(base, 0x03);
523
524 if (start & (CACHE_LINE_SIZE - 1)) {
525 start &= ~(CACHE_LINE_SIZE - 1);
526 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
527 writel_relaxed(start, base + L2X0_INV_LINE_PA);
528 start += CACHE_LINE_SIZE;
529 }
530
531 if (end & (CACHE_LINE_SIZE - 1)) {
532 end &= ~(CACHE_LINE_SIZE - 1);
533 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
534 writel_relaxed(end, base + L2X0_INV_LINE_PA);
535 }
536
537 l2c_set_debug(base, 0x00);
538 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
539 }
540
541 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
542 __l2c210_cache_sync(base);
543}
544
545static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
546{
547 raw_spinlock_t *lock = &l2x0_lock;
548 unsigned long flags;
549 void __iomem *base = l2x0_base;
550
551 raw_spin_lock_irqsave(lock, flags);
552 while (start < end) {
553 unsigned long blk_end = start + min(end - start, 4096UL);
554
555 l2c_set_debug(base, 0x03);
556 while (start < blk_end) {
557 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
558 writel_relaxed(start, base + L2X0_INV_LINE_PA);
559 start += CACHE_LINE_SIZE;
560 }
561 l2c_set_debug(base, 0x00);
562
563 if (blk_end < end) {
564 raw_spin_unlock_irqrestore(lock, flags);
565 raw_spin_lock_irqsave(lock, flags);
566 }
567 }
568 raw_spin_unlock_irqrestore(lock, flags);
569 __l2c210_cache_sync(base);
570}
571
Russell King99ca17722014-03-15 16:48:18 +0000572static void l2c310_flush_all_erratum(void)
573{
574 void __iomem *base = l2x0_base;
575 unsigned long flags;
576
577 raw_spin_lock_irqsave(&l2x0_lock, flags);
578 l2c_set_debug(base, 0x03);
579 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
580 l2c_set_debug(base, 0x00);
581 __l2c210_cache_sync(base);
582 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
583}
584
Russell King09a5d182014-03-15 16:48:13 +0000585static void __init l2c310_save(void __iomem *base)
Russell Kingb98556f22014-03-15 16:48:11 +0000586{
Russell King09a5d182014-03-15 16:48:13 +0000587 unsigned revision;
Russell Kingb98556f22014-03-15 16:48:11 +0000588
Russell Kingddf7d792014-03-28 14:18:35 +0000589 l2c_save(base);
590
Russell Kingb98556f22014-03-15 16:48:11 +0000591 l2x0_saved_regs.tag_latency = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000592 L310_TAG_LATENCY_CTRL);
Russell Kingb98556f22014-03-15 16:48:11 +0000593 l2x0_saved_regs.data_latency = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000594 L310_DATA_LATENCY_CTRL);
Russell Kingb98556f22014-03-15 16:48:11 +0000595 l2x0_saved_regs.filter_end = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000596 L310_ADDR_FILTER_END);
Russell Kingb98556f22014-03-15 16:48:11 +0000597 l2x0_saved_regs.filter_start = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000598 L310_ADDR_FILTER_START);
Russell Kingb98556f22014-03-15 16:48:11 +0000599
Russell King09a5d182014-03-15 16:48:13 +0000600 revision = readl_relaxed(base + L2X0_CACHE_ID) &
Russell Kingb98556f22014-03-15 16:48:11 +0000601 L2X0_CACHE_ID_RTL_MASK;
602
Russell King09a5d182014-03-15 16:48:13 +0000603 /* From r2p0, there is Prefetch offset/control register */
604 if (revision >= L310_CACHE_ID_RTL_R2P0)
605 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000606 L310_PREFETCH_CTRL);
Russell Kingb98556f22014-03-15 16:48:11 +0000607
Russell King09a5d182014-03-15 16:48:13 +0000608 /* From r3p0, there is Power control register */
609 if (revision >= L310_CACHE_ID_RTL_R3P0)
610 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
Russell King1a5a9542014-03-16 20:52:25 +0000611 L310_POWER_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000612}
613
614static void l2c310_resume(void)
615{
616 void __iomem *base = l2x0_base;
617
618 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
619 unsigned revision;
620
621 /* restore pl310 setup */
622 writel_relaxed(l2x0_saved_regs.tag_latency,
Russell King1a5a9542014-03-16 20:52:25 +0000623 base + L310_TAG_LATENCY_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000624 writel_relaxed(l2x0_saved_regs.data_latency,
Russell King1a5a9542014-03-16 20:52:25 +0000625 base + L310_DATA_LATENCY_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000626 writel_relaxed(l2x0_saved_regs.filter_end,
Russell King1a5a9542014-03-16 20:52:25 +0000627 base + L310_ADDR_FILTER_END);
Russell King09a5d182014-03-15 16:48:13 +0000628 writel_relaxed(l2x0_saved_regs.filter_start,
Russell King1a5a9542014-03-16 20:52:25 +0000629 base + L310_ADDR_FILTER_START);
Russell King09a5d182014-03-15 16:48:13 +0000630
631 revision = readl_relaxed(base + L2X0_CACHE_ID) &
632 L2X0_CACHE_ID_RTL_MASK;
633
634 if (revision >= L310_CACHE_ID_RTL_R2P0)
Russell King8abd2592014-03-16 17:38:08 +0000635 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
Russell King1a5a9542014-03-16 20:52:25 +0000636 L310_PREFETCH_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000637 if (revision >= L310_CACHE_ID_RTL_R3P0)
Russell King8abd2592014-03-16 17:38:08 +0000638 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
Russell King1a5a9542014-03-16 20:52:25 +0000639 L310_POWER_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000640
641 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
642 }
Russell Kingb98556f22014-03-15 16:48:11 +0000643}
644
Russell King4374d642014-03-19 15:39:09 +0000645static void __init l2c310_enable(void __iomem *base, u32 aux, unsigned num_lock)
646{
647 unsigned rev = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
648 bool cortex_a9 = read_cpuid_part_number() == ARM_CPU_PART_CORTEX_A9;
649
650 if (rev >= L310_CACHE_ID_RTL_R2P0) {
651 if (cortex_a9) {
652 aux |= L310_AUX_CTRL_EARLY_BRESP;
653 pr_info("L2C-310 enabling early BRESP for Cortex-A9\n");
654 } else if (aux & L310_AUX_CTRL_EARLY_BRESP) {
655 pr_warn("L2C-310 early BRESP only supported with Cortex-A9\n");
656 aux &= ~L310_AUX_CTRL_EARLY_BRESP;
657 }
658 }
659
Russell King3a43b582014-03-28 14:22:04 +0000660 /* r3p0 or later has power control register */
661 if (rev >= L310_CACHE_ID_RTL_R3P0) {
662 u32 power_ctrl;
663
664 l2c_write_sec(L310_DYNAMIC_CLK_GATING_EN | L310_STNDBY_MODE_EN,
665 base, L310_POWER_CTRL);
666 power_ctrl = readl_relaxed(base + L310_POWER_CTRL);
667 pr_info("L2C-310 dynamic clock gating %sabled, standby mode %sabled\n",
668 power_ctrl & L310_DYNAMIC_CLK_GATING_EN ? "en" : "dis",
669 power_ctrl & L310_STNDBY_MODE_EN ? "en" : "dis");
670 }
671
Russell Kinga4b041a2014-04-11 00:48:25 +0100672 /*
673 * Always enable non-secure access to the lockdown registers -
674 * we write to them as part of the L2C enable sequence so they
675 * need to be accessible.
676 */
677 aux |= L310_AUX_CTRL_NS_LOCKDOWN;
678
Russell King4374d642014-03-19 15:39:09 +0000679 l2c_enable(base, aux, num_lock);
680}
681
Russell King75461f52014-03-15 16:48:07 +0000682static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
683 struct outer_cache_fns *fns)
684{
685 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
Russell Kinga8875a02014-03-16 20:02:06 +0000686 const char *errata[8];
Russell King75461f52014-03-15 16:48:07 +0000687 unsigned n = 0;
688
Russell Kingebd4219f2014-03-15 19:08:11 +0000689 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
690 revision < L310_CACHE_ID_RTL_R2P0 &&
691 /* For bcm compatibility */
Russell Kingf7773322014-03-15 20:51:47 +0000692 fns->inv_range == l2c210_inv_range) {
Russell Kingebd4219f2014-03-15 19:08:11 +0000693 fns->inv_range = l2c310_inv_range_erratum;
694 fns->flush_range = l2c310_flush_range_erratum;
695 errata[n++] = "588369";
696 }
697
Russell King99ca17722014-03-15 16:48:18 +0000698 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
699 revision >= L310_CACHE_ID_RTL_R2P0 &&
700 revision < L310_CACHE_ID_RTL_R3P1) {
701 fns->flush_all = l2c310_flush_all_erratum;
702 errata[n++] = "727915";
703 }
704
Russell Kinga8875a02014-03-16 20:02:06 +0000705 if (revision >= L310_CACHE_ID_RTL_R3P0 &&
706 revision < L310_CACHE_ID_RTL_R3P2) {
Russell King1a5a9542014-03-16 20:52:25 +0000707 u32 val = readl_relaxed(base + L310_PREFETCH_CTRL);
Russell Kinga8875a02014-03-16 20:02:06 +0000708 /* I don't think bit23 is required here... but iMX6 does so */
709 if (val & (BIT(30) | BIT(23))) {
710 val &= ~(BIT(30) | BIT(23));
Russell King1a5a9542014-03-16 20:52:25 +0000711 l2c_write_sec(val, base, L310_PREFETCH_CTRL);
Russell Kinga8875a02014-03-16 20:02:06 +0000712 errata[n++] = "752271";
713 }
714 }
715
Russell King75461f52014-03-15 16:48:07 +0000716 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
717 revision == L310_CACHE_ID_RTL_R3P0) {
718 sync_reg_offset = L2X0_DUMMY_REG;
719 errata[n++] = "753970";
720 }
721
722 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
723 errata[n++] = "769419";
724
725 if (n) {
726 unsigned i;
727
728 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
729 for (i = 0; i < n; i++)
730 pr_cont(" %s", errata[i]);
731 pr_cont(" enabled\n");
732 }
733}
734
735static const struct l2c_init_data l2c310_init_fns __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000736 .type = "L2C-310",
Russell King0493aef2014-03-15 23:26:24 +0000737 .way_size_0 = SZ_8K,
Russell King75461f52014-03-15 16:48:07 +0000738 .num_lock = 8,
Russell King4374d642014-03-19 15:39:09 +0000739 .enable = l2c310_enable,
Russell King75461f52014-03-15 16:48:07 +0000740 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000741 .save = l2c310_save,
Russell King75461f52014-03-15 16:48:07 +0000742 .outer_cache = {
Russell Kingf7773322014-03-15 20:51:47 +0000743 .inv_range = l2c210_inv_range,
744 .clean_range = l2c210_clean_range,
745 .flush_range = l2c210_flush_range,
746 .flush_all = l2c210_flush_all,
747 .disable = l2c_disable,
748 .sync = l2c210_sync,
Russell King09a5d182014-03-15 16:48:13 +0000749 .resume = l2c310_resume,
Russell King75461f52014-03-15 16:48:07 +0000750 },
751};
752
Russell King96054b02014-03-15 16:47:52 +0000753static void __init __l2c_init(const struct l2c_init_data *data,
754 u32 aux_val, u32 aux_mask, u32 cache_id)
Catalin Marinas382266a2007-02-05 14:48:19 +0100755{
Russell King75461f52014-03-15 16:48:07 +0000756 struct outer_cache_fns fns;
Russell King0493aef2014-03-15 23:26:24 +0000757 unsigned way_size_bits, ways;
Russell King3e175ca2011-09-18 11:27:30 +0100758 u32 aux;
Catalin Marinas382266a2007-02-05 14:48:19 +0100759
Catalin Marinas6775a552010-07-28 22:01:25 +0100760 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100761
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100762 aux &= aux_mask;
763 aux |= aux_val;
764
Jason McMullan64039be2010-05-05 18:59:37 +0100765 /* Determine the number of ways */
Rob Herring6e7acee2013-03-25 17:02:48 +0100766 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100767 case L2X0_CACHE_ID_PART_L310:
768 if (aux & (1 << 16))
769 ways = 16;
770 else
771 ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100772 break;
Russell King75461f52014-03-15 16:48:07 +0000773
Jason McMullan64039be2010-05-05 18:59:37 +0100774 case L2X0_CACHE_ID_PART_L210:
Russell King5f47c382014-03-15 23:07:07 +0000775 case L2X0_CACHE_ID_PART_L220:
Jason McMullan64039be2010-05-05 18:59:37 +0100776 ways = (aux >> 13) & 0xf;
Jason McMullan64039be2010-05-05 18:59:37 +0100777 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100778
779 case AURORA_CACHE_ID:
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100780 ways = (aux >> 13) & 0xf;
781 ways = 2 << ((ways + 1) >> 2);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100782 break;
Russell King75461f52014-03-15 16:48:07 +0000783
Jason McMullan64039be2010-05-05 18:59:37 +0100784 default:
785 /* Assume unknown chips have 8 ways */
786 ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100787 break;
788 }
789
790 l2x0_way_mask = (1 << ways) - 1;
791
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100792 /*
Russell King0493aef2014-03-15 23:26:24 +0000793 * way_size_0 is the size that a way_size value of zero would be
794 * given the calculation: way_size = way_size_0 << way_size_bits.
795 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
796 * then way_size_0 would be 8k.
797 *
798 * L2 cache size = number of ways * way size.
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530799 */
Russell King1a5a9542014-03-16 20:52:25 +0000800 way_size_bits = (aux & L2C_AUX_CTRL_WAY_SIZE_MASK) >>
801 L2C_AUX_CTRL_WAY_SIZE_SHIFT;
Russell King0493aef2014-03-15 23:26:24 +0000802 l2x0_size = ways * (data->way_size_0 << way_size_bits);
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530803
Russell King75461f52014-03-15 16:48:07 +0000804 fns = data->outer_cache;
Russell King8abd2592014-03-16 17:38:08 +0000805 fns.write_sec = outer_cache.write_sec;
Russell King75461f52014-03-15 16:48:07 +0000806 if (data->fixup)
807 data->fixup(l2x0_base, cache_id, &fns);
808
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530809 /*
Russell King3b8bad52014-03-15 16:47:57 +0000810 * Check if l2x0 controller is already enabled. If we are booting
811 * in non-secure mode accessing the below registers will fault.
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100812 */
Russell King3b8bad52014-03-15 16:47:57 +0000813 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
814 data->enable(l2x0_base, aux, data->num_lock);
Catalin Marinas382266a2007-02-05 14:48:19 +0100815
Russell Kingddf7d792014-03-28 14:18:35 +0000816 outer_cache = fns;
817
818 /*
819 * It is strange to save the register state before initialisation,
820 * but hey, this is what the DT implementations decided to do.
821 */
822 if (data->save)
823 data->save(l2x0_base);
824
Yilu Mao9d4876f2012-09-03 09:14:56 +0100825 /* Re-read it in case some bits are reserved. */
826 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
827
Russell Kingcdef8682014-03-15 16:48:08 +0000828 pr_info("%s cache controller enabled, %d ways, %d kB\n",
Russell King051334b2014-03-15 23:04:10 +0000829 data->type, ways, l2x0_size >> 10);
Russell Kingcdef8682014-03-15 16:48:08 +0000830 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
Russell King051334b2014-03-15 23:04:10 +0000831 data->type, cache_id, aux);
Catalin Marinas382266a2007-02-05 14:48:19 +0100832}
Rob Herring8c369262011-08-03 18:12:05 +0100833
Russell King96054b02014-03-15 16:47:52 +0000834void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
835{
Russell King75461f52014-03-15 16:48:07 +0000836 const struct l2c_init_data *data;
Russell King96054b02014-03-15 16:47:52 +0000837 u32 cache_id;
838
839 l2x0_base = base;
840
841 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
842
Russell King75461f52014-03-15 16:48:07 +0000843 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
844 default:
Russell King6a28cf52014-03-15 18:55:53 +0000845 case L2X0_CACHE_ID_PART_L210:
846 data = &l2c210_data;
847 break;
848
Russell King733c6bb2014-03-15 21:29:28 +0000849 case L2X0_CACHE_ID_PART_L220:
850 data = &l2c220_data;
851 break;
852
Russell King75461f52014-03-15 16:48:07 +0000853 case L2X0_CACHE_ID_PART_L310:
854 data = &l2c310_init_fns;
855 break;
856 }
857
858 __l2c_init(data, aux_val, aux_mask, cache_id);
Russell King96054b02014-03-15 16:47:52 +0000859}
860
Rob Herring8c369262011-08-03 18:12:05 +0100861#ifdef CONFIG_OF
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100862static int l2_wt_override;
863
Russell King96054b02014-03-15 16:47:52 +0000864/* Aurora don't have the cache ID register available, so we have to
865 * pass it though the device tree */
866static u32 cache_id_part_number_from_dt;
867
Russell Kingda3627f2014-03-15 16:48:06 +0000868static void __init l2x0_of_parse(const struct device_node *np,
869 u32 *aux_val, u32 *aux_mask)
870{
871 u32 data[2] = { 0, 0 };
872 u32 tag = 0;
873 u32 dirty = 0;
874 u32 val = 0, mask = 0;
875
876 of_property_read_u32(np, "arm,tag-latency", &tag);
877 if (tag) {
878 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
879 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
880 }
881
882 of_property_read_u32_array(np, "arm,data-latency",
883 data, ARRAY_SIZE(data));
884 if (data[0] && data[1]) {
885 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
886 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
887 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
888 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
889 }
890
891 of_property_read_u32(np, "arm,dirty-latency", &dirty);
892 if (dirty) {
893 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
894 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
895 }
896
897 *aux_val &= ~mask;
898 *aux_val |= val;
899 *aux_mask &= ~mask;
900}
901
Russell King6a28cf52014-03-15 18:55:53 +0000902static const struct l2c_init_data of_l2c210_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000903 .type = "L2C-210",
Russell King0493aef2014-03-15 23:26:24 +0000904 .way_size_0 = SZ_8K,
Russell King6a28cf52014-03-15 18:55:53 +0000905 .num_lock = 1,
906 .of_parse = l2x0_of_parse,
907 .enable = l2c_enable,
Russell Kingddf7d792014-03-28 14:18:35 +0000908 .save = l2c_save,
Russell King6a28cf52014-03-15 18:55:53 +0000909 .outer_cache = {
910 .inv_range = l2c210_inv_range,
911 .clean_range = l2c210_clean_range,
912 .flush_range = l2c210_flush_range,
913 .flush_all = l2c210_flush_all,
914 .disable = l2c_disable,
915 .sync = l2c210_sync,
916 .resume = l2c210_resume,
917 },
918};
919
Russell King733c6bb2014-03-15 21:29:28 +0000920static const struct l2c_init_data of_l2c220_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000921 .type = "L2C-220",
Russell King0493aef2014-03-15 23:26:24 +0000922 .way_size_0 = SZ_8K,
Russell King733c6bb2014-03-15 21:29:28 +0000923 .num_lock = 1,
Russell Kingda3627f2014-03-15 16:48:06 +0000924 .of_parse = l2x0_of_parse,
Russell Kinga4b041a2014-04-11 00:48:25 +0100925 .enable = l2c220_enable,
Russell Kingddf7d792014-03-28 14:18:35 +0000926 .save = l2c_save,
Russell Kingda3627f2014-03-15 16:48:06 +0000927 .outer_cache = {
Russell King733c6bb2014-03-15 21:29:28 +0000928 .inv_range = l2c220_inv_range,
929 .clean_range = l2c220_clean_range,
930 .flush_range = l2c220_flush_range,
931 .flush_all = l2c220_flush_all,
932 .disable = l2c_disable,
933 .sync = l2c220_sync,
934 .resume = l2c210_resume,
Russell Kingda3627f2014-03-15 16:48:06 +0000935 },
936};
937
Russell Kingf7773322014-03-15 20:51:47 +0000938static void __init l2c310_of_parse(const struct device_node *np,
939 u32 *aux_val, u32 *aux_mask)
Russell Kingda3627f2014-03-15 16:48:06 +0000940{
941 u32 data[3] = { 0, 0, 0 };
942 u32 tag[3] = { 0, 0, 0 };
943 u32 filter[2] = { 0, 0 };
944
945 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
946 if (tag[0] && tag[1] && tag[2])
947 writel_relaxed(
Russell King1a5a9542014-03-16 20:52:25 +0000948 L310_LATENCY_CTRL_RD(tag[0] - 1) |
949 L310_LATENCY_CTRL_WR(tag[1] - 1) |
950 L310_LATENCY_CTRL_SETUP(tag[2] - 1),
951 l2x0_base + L310_TAG_LATENCY_CTRL);
Russell Kingda3627f2014-03-15 16:48:06 +0000952
953 of_property_read_u32_array(np, "arm,data-latency",
954 data, ARRAY_SIZE(data));
955 if (data[0] && data[1] && data[2])
956 writel_relaxed(
Russell King1a5a9542014-03-16 20:52:25 +0000957 L310_LATENCY_CTRL_RD(data[0] - 1) |
958 L310_LATENCY_CTRL_WR(data[1] - 1) |
959 L310_LATENCY_CTRL_SETUP(data[2] - 1),
960 l2x0_base + L310_DATA_LATENCY_CTRL);
Russell Kingda3627f2014-03-15 16:48:06 +0000961
962 of_property_read_u32_array(np, "arm,filter-ranges",
963 filter, ARRAY_SIZE(filter));
964 if (filter[1]) {
965 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
Russell King1a5a9542014-03-16 20:52:25 +0000966 l2x0_base + L310_ADDR_FILTER_END);
967 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L310_ADDR_FILTER_EN,
968 l2x0_base + L310_ADDR_FILTER_START);
Russell Kingda3627f2014-03-15 16:48:06 +0000969 }
970}
971
Russell Kingf7773322014-03-15 20:51:47 +0000972static const struct l2c_init_data of_l2c310_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000973 .type = "L2C-310",
Russell King0493aef2014-03-15 23:26:24 +0000974 .way_size_0 = SZ_8K,
Russell King3b8bad52014-03-15 16:47:57 +0000975 .num_lock = 8,
Russell Kingf7773322014-03-15 20:51:47 +0000976 .of_parse = l2c310_of_parse,
Russell King4374d642014-03-19 15:39:09 +0000977 .enable = l2c310_enable,
Russell King75461f52014-03-15 16:48:07 +0000978 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000979 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +0000980 .outer_cache = {
Russell Kingf7773322014-03-15 20:51:47 +0000981 .inv_range = l2c210_inv_range,
982 .clean_range = l2c210_clean_range,
983 .flush_range = l2c210_flush_range,
984 .flush_all = l2c210_flush_all,
985 .disable = l2c_disable,
986 .sync = l2c210_sync,
Russell King09a5d182014-03-15 16:48:13 +0000987 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +0000988 },
989};
990
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100991/*
992 * Note that the end addresses passed to Linux primitives are
993 * noninclusive, while the hardware cache range operations use
994 * inclusive start and end addresses.
995 */
996static unsigned long calc_range_end(unsigned long start, unsigned long end)
997{
998 /*
999 * Limit the number of cache lines processed at once,
1000 * since cache range operations stall the CPU pipeline
1001 * until completion.
1002 */
1003 if (end > start + MAX_RANGE_SIZE)
1004 end = start + MAX_RANGE_SIZE;
1005
1006 /*
1007 * Cache range operations can't straddle a page boundary.
1008 */
1009 if (end > PAGE_ALIGN(start+1))
1010 end = PAGE_ALIGN(start+1);
1011
1012 return end;
1013}
1014
1015/*
1016 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
1017 * and range operations only do a TLB lookup on the start address.
1018 */
1019static void aurora_pa_range(unsigned long start, unsigned long end,
1020 unsigned long offset)
1021{
1022 unsigned long flags;
1023
1024 raw_spin_lock_irqsave(&l2x0_lock, flags);
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +01001025 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
1026 writel_relaxed(end, l2x0_base + offset);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001027 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
1028
1029 cache_sync();
1030}
1031
1032static void aurora_inv_range(unsigned long start, unsigned long end)
1033{
1034 /*
1035 * round start and end adresses up to cache line size
1036 */
1037 start &= ~(CACHE_LINE_SIZE - 1);
1038 end = ALIGN(end, CACHE_LINE_SIZE);
1039
1040 /*
1041 * Invalidate all full cache lines between 'start' and 'end'.
1042 */
1043 while (start < end) {
1044 unsigned long range_end = calc_range_end(start, end);
1045 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1046 AURORA_INVAL_RANGE_REG);
1047 start = range_end;
1048 }
1049}
1050
1051static void aurora_clean_range(unsigned long start, unsigned long end)
1052{
1053 /*
1054 * If L2 is forced to WT, the L2 will always be clean and we
1055 * don't need to do anything here.
1056 */
1057 if (!l2_wt_override) {
1058 start &= ~(CACHE_LINE_SIZE - 1);
1059 end = ALIGN(end, CACHE_LINE_SIZE);
1060 while (start != end) {
1061 unsigned long range_end = calc_range_end(start, end);
1062 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1063 AURORA_CLEAN_RANGE_REG);
1064 start = range_end;
1065 }
1066 }
1067}
1068
1069static void aurora_flush_range(unsigned long start, unsigned long end)
1070{
Gregory CLEMENT8b827c62013-01-07 11:27:14 +01001071 start &= ~(CACHE_LINE_SIZE - 1);
1072 end = ALIGN(end, CACHE_LINE_SIZE);
1073 while (start != end) {
1074 unsigned long range_end = calc_range_end(start, end);
1075 /*
1076 * If L2 is forced to WT, the L2 will always be clean and we
1077 * just need to invalidate.
1078 */
1079 if (l2_wt_override)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001080 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
Gregory CLEMENT8b827c62013-01-07 11:27:14 +01001081 AURORA_INVAL_RANGE_REG);
1082 else
1083 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1084 AURORA_FLUSH_RANGE_REG);
1085 start = range_end;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001086 }
1087}
1088
Russell Kingda3627f2014-03-15 16:48:06 +00001089static void aurora_save(void __iomem *base)
1090{
1091 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1092 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1093}
1094
1095static void aurora_resume(void)
1096{
Russell King09a5d182014-03-15 16:48:13 +00001097 void __iomem *base = l2x0_base;
1098
1099 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1100 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
1101 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
Russell Kingda3627f2014-03-15 16:48:06 +00001102 }
1103}
1104
Russell King40266d62014-03-15 16:47:59 +00001105/*
1106 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1107 * broadcasting of cache commands to L2.
1108 */
1109static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
1110 unsigned num_lock)
Russell Kingda3627f2014-03-15 16:48:06 +00001111{
Russell King40266d62014-03-15 16:47:59 +00001112 u32 u;
1113
1114 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
Russell Kingda3627f2014-03-15 16:48:06 +00001115 u |= AURORA_CTRL_FW; /* Set the FW bit */
Russell King40266d62014-03-15 16:47:59 +00001116 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1117
Russell Kingda3627f2014-03-15 16:48:06 +00001118 isb();
Russell King40266d62014-03-15 16:47:59 +00001119
1120 l2c_enable(base, aux, num_lock);
Russell Kingda3627f2014-03-15 16:48:06 +00001121}
1122
Russell King75461f52014-03-15 16:48:07 +00001123static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1124 struct outer_cache_fns *fns)
1125{
1126 sync_reg_offset = AURORA_SYNC_REG;
1127}
1128
Russell Kingda3627f2014-03-15 16:48:06 +00001129static void __init aurora_of_parse(const struct device_node *np,
1130 u32 *aux_val, u32 *aux_mask)
1131{
1132 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1133 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1134
1135 of_property_read_u32(np, "cache-id-part",
1136 &cache_id_part_number_from_dt);
1137
1138 /* Determine and save the write policy */
1139 l2_wt_override = of_property_read_bool(np, "wt-override");
1140
1141 if (l2_wt_override) {
1142 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1143 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1144 }
1145
1146 *aux_val &= ~mask;
1147 *aux_val |= val;
1148 *aux_mask &= ~mask;
1149}
1150
1151static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001152 .type = "Aurora",
Russell King0493aef2014-03-15 23:26:24 +00001153 .way_size_0 = SZ_4K,
Russell King3b8bad52014-03-15 16:47:57 +00001154 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +00001155 .of_parse = aurora_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +00001156 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +00001157 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +00001158 .save = aurora_save,
1159 .outer_cache = {
1160 .inv_range = aurora_inv_range,
1161 .clean_range = aurora_clean_range,
1162 .flush_range = aurora_flush_range,
1163 .flush_all = l2x0_flush_all,
1164 .disable = l2x0_disable,
1165 .sync = l2x0_cache_sync,
1166 .resume = aurora_resume,
1167 },
1168};
1169
1170static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001171 .type = "Aurora",
Russell King0493aef2014-03-15 23:26:24 +00001172 .way_size_0 = SZ_4K,
Russell King3b8bad52014-03-15 16:47:57 +00001173 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +00001174 .of_parse = aurora_of_parse,
Russell King40266d62014-03-15 16:47:59 +00001175 .enable = aurora_enable_no_outer,
Russell King75461f52014-03-15 16:48:07 +00001176 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +00001177 .save = aurora_save,
1178 .outer_cache = {
1179 .resume = aurora_resume,
1180 },
1181};
1182
Christian Daudt3b656fe2013-05-09 22:21:01 +01001183/*
1184 * For certain Broadcom SoCs, depending on the address range, different offsets
1185 * need to be added to the address before passing it to L2 for
1186 * invalidation/clean/flush
1187 *
1188 * Section Address Range Offset EMI
1189 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
1190 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
1191 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
1192 *
1193 * When the start and end addresses have crossed two different sections, we
1194 * need to break the L2 operation into two, each within its own section.
1195 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1196 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1197 * 0xC0000000 - 0xC0001000
1198 *
1199 * Note 1:
1200 * By breaking a single L2 operation into two, we may potentially suffer some
1201 * performance hit, but keep in mind the cross section case is very rare
1202 *
1203 * Note 2:
1204 * We do not need to handle the case when the start address is in
1205 * Section 1 and the end address is in Section 3, since it is not a valid use
1206 * case
1207 *
1208 * Note 3:
1209 * Section 1 in practical terms can no longer be used on rev A2. Because of
1210 * that the code does not need to handle section 1 at all.
1211 *
1212 */
1213#define BCM_SYS_EMI_START_ADDR 0x40000000UL
1214#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
1215
1216#define BCM_SYS_EMI_OFFSET 0x40000000UL
1217#define BCM_VC_EMI_OFFSET 0x80000000UL
1218
1219static inline int bcm_addr_is_sys_emi(unsigned long addr)
1220{
1221 return (addr >= BCM_SYS_EMI_START_ADDR) &&
1222 (addr < BCM_VC_EMI_SEC3_START_ADDR);
1223}
1224
1225static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1226{
1227 if (bcm_addr_is_sys_emi(addr))
1228 return addr + BCM_SYS_EMI_OFFSET;
1229 else
1230 return addr + BCM_VC_EMI_OFFSET;
1231}
1232
1233static void bcm_inv_range(unsigned long start, unsigned long end)
1234{
1235 unsigned long new_start, new_end;
1236
1237 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1238
1239 if (unlikely(end <= start))
1240 return;
1241
1242 new_start = bcm_l2_phys_addr(start);
1243 new_end = bcm_l2_phys_addr(end);
1244
1245 /* normal case, no cross section between start and end */
1246 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001247 l2c210_inv_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001248 return;
1249 }
1250
1251 /* They cross sections, so it can only be a cross from section
1252 * 2 to section 3
1253 */
Russell King90811142014-03-19 19:14:13 +00001254 l2c210_inv_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001255 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001256 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001257 new_end);
1258}
1259
1260static void bcm_clean_range(unsigned long start, unsigned long end)
1261{
1262 unsigned long new_start, new_end;
1263
1264 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1265
1266 if (unlikely(end <= start))
1267 return;
1268
Christian Daudt3b656fe2013-05-09 22:21:01 +01001269 new_start = bcm_l2_phys_addr(start);
1270 new_end = bcm_l2_phys_addr(end);
1271
1272 /* normal case, no cross section between start and end */
1273 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001274 l2c210_clean_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001275 return;
1276 }
1277
1278 /* They cross sections, so it can only be a cross from section
1279 * 2 to section 3
1280 */
Russell King90811142014-03-19 19:14:13 +00001281 l2c210_clean_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001282 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001283 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001284 new_end);
1285}
1286
1287static void bcm_flush_range(unsigned long start, unsigned long end)
1288{
1289 unsigned long new_start, new_end;
1290
1291 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1292
1293 if (unlikely(end <= start))
1294 return;
1295
1296 if ((end - start) >= l2x0_size) {
Russell King90811142014-03-19 19:14:13 +00001297 outer_cache.flush_all();
Christian Daudt3b656fe2013-05-09 22:21:01 +01001298 return;
1299 }
1300
1301 new_start = bcm_l2_phys_addr(start);
1302 new_end = bcm_l2_phys_addr(end);
1303
1304 /* normal case, no cross section between start and end */
1305 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001306 l2c210_flush_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001307 return;
1308 }
1309
1310 /* They cross sections, so it can only be a cross from section
1311 * 2 to section 3
1312 */
Russell King90811142014-03-19 19:14:13 +00001313 l2c210_flush_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001314 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001315 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001316 new_end);
1317}
1318
Russell King90811142014-03-19 19:14:13 +00001319/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
Russell Kingda3627f2014-03-15 16:48:06 +00001320static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001321 .type = "BCM-L2C-310",
Russell King0493aef2014-03-15 23:26:24 +00001322 .way_size_0 = SZ_8K,
Russell King3b8bad52014-03-15 16:47:57 +00001323 .num_lock = 8,
Russell Kingf7773322014-03-15 20:51:47 +00001324 .of_parse = l2c310_of_parse,
Russell King4374d642014-03-19 15:39:09 +00001325 .enable = l2c310_enable,
Russell King09a5d182014-03-15 16:48:13 +00001326 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +00001327 .outer_cache = {
1328 .inv_range = bcm_inv_range,
1329 .clean_range = bcm_clean_range,
1330 .flush_range = bcm_flush_range,
Russell Kingf7773322014-03-15 20:51:47 +00001331 .flush_all = l2c210_flush_all,
1332 .disable = l2c_disable,
1333 .sync = l2c210_sync,
Russell King09a5d182014-03-15 16:48:13 +00001334 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +00001335 },
1336};
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001337
Russell King9846dfc2014-03-15 16:47:55 +00001338static void __init tauros3_save(void __iomem *base)
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001339{
Russell Kingddf7d792014-03-28 14:18:35 +00001340 l2c_save(base);
1341
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001342 l2x0_saved_regs.aux2_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001343 readl_relaxed(base + TAUROS3_AUX2_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001344 l2x0_saved_regs.prefetch_ctrl =
Russell King1a5a9542014-03-16 20:52:25 +00001345 readl_relaxed(base + L310_PREFETCH_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001346}
1347
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001348static void tauros3_resume(void)
1349{
Russell King09a5d182014-03-15 16:48:13 +00001350 void __iomem *base = l2x0_base;
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001351
Russell King09a5d182014-03-15 16:48:13 +00001352 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1353 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1354 base + TAUROS3_AUX2_CTRL);
1355 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
Russell King1a5a9542014-03-16 20:52:25 +00001356 base + L310_PREFETCH_CTRL);
Russell King09a5d182014-03-15 16:48:13 +00001357
1358 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
1359 }
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001360}
1361
Russell Kingc02642b2014-03-15 16:47:54 +00001362static const struct l2c_init_data of_tauros3_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001363 .type = "Tauros3",
Russell King0493aef2014-03-15 23:26:24 +00001364 .way_size_0 = SZ_8K,
Russell King3b8bad52014-03-15 16:47:57 +00001365 .num_lock = 8,
1366 .enable = l2c_enable,
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001367 .save = tauros3_save,
1368 /* Tauros3 broadcasts L1 cache operations to L2 */
1369 .outer_cache = {
1370 .resume = tauros3_resume,
1371 },
1372};
1373
Russell Kinga65bb922014-03-15 16:48:01 +00001374#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
Rob Herring8c369262011-08-03 18:12:05 +01001375static const struct of_device_id l2x0_ids[] __initconst = {
Russell King6a28cf52014-03-15 18:55:53 +00001376 L2C_ID("arm,l210-cache", of_l2c210_data),
Russell King733c6bb2014-03-15 21:29:28 +00001377 L2C_ID("arm,l220-cache", of_l2c220_data),
Russell Kingf7773322014-03-15 20:51:47 +00001378 L2C_ID("arm,pl310-cache", of_l2c310_data),
Russell Kingc02642b2014-03-15 16:47:54 +00001379 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1380 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1381 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1382 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
Russell Kinga65bb922014-03-15 16:48:01 +00001383 /* Deprecated IDs */
Russell Kingc02642b2014-03-15 16:47:54 +00001384 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
Rob Herring8c369262011-08-03 18:12:05 +01001385 {}
1386};
1387
Russell King3e175ca2011-09-18 11:27:30 +01001388int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +01001389{
Russell Kingc02642b2014-03-15 16:47:54 +00001390 const struct l2c_init_data *data;
Rob Herring8c369262011-08-03 18:12:05 +01001391 struct device_node *np;
Barry Song91c2ebb2011-09-30 14:43:12 +01001392 struct resource res;
Russell King96054b02014-03-15 16:47:52 +00001393 u32 cache_id;
Rob Herring8c369262011-08-03 18:12:05 +01001394
1395 np = of_find_matching_node(NULL, l2x0_ids);
1396 if (!np)
1397 return -ENODEV;
Barry Song91c2ebb2011-09-30 14:43:12 +01001398
1399 if (of_address_to_resource(np, 0, &res))
1400 return -ENODEV;
1401
1402 l2x0_base = ioremap(res.start, resource_size(&res));
Rob Herring8c369262011-08-03 18:12:05 +01001403 if (!l2x0_base)
1404 return -ENOMEM;
1405
Barry Song91c2ebb2011-09-30 14:43:12 +01001406 l2x0_saved_regs.phy_base = res.start;
1407
1408 data = of_match_node(l2x0_ids, np)->data;
1409
Russell Kingd9d1f3e2014-03-17 12:59:08 +00001410 /* All L2 caches are unified, so this property should be specified */
1411 if (!of_property_read_bool(np, "cache-unified"))
1412 pr_err("L2C: device tree omits to specify unified cache\n");
1413
Rob Herring8c369262011-08-03 18:12:05 +01001414 /* L2 configuration can only be changed if the cache is disabled */
Russell King40266d62014-03-15 16:47:59 +00001415 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
Russell Kingc02642b2014-03-15 16:47:54 +00001416 if (data->of_parse)
1417 data->of_parse(np, &aux_val, &aux_mask);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001418
Russell King96054b02014-03-15 16:47:52 +00001419 if (cache_id_part_number_from_dt)
1420 cache_id = cache_id_part_number_from_dt;
1421 else
1422 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1423
1424 __l2c_init(data, aux_val, aux_mask, cache_id);
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001425
Rob Herring8c369262011-08-03 18:12:05 +01001426 return 0;
1427}
1428#endif