blob: 369a9d01d94f72ba0fa07fc588a163eb10ebbfdb [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
Rob Herring8c369262011-08-03 18:12:05 +010019#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010020#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Rob Herring8c369262011-08-03 18:12:05 +010023#include <linux/of.h>
24#include <linux/of_address.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025
26#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010027#include <asm/hardware/cache-l2x0.h>
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +010028#include "cache-tauros3.h"
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010029#include "cache-aurora-l2.h"
Catalin Marinas382266a2007-02-05 14:48:19 +010030
Russell Kingc02642b2014-03-15 16:47:54 +000031struct l2c_init_data {
Russell King051334b2014-03-15 23:04:10 +000032 const char *type;
Russell King0493aef2014-03-15 23:26:24 +000033 unsigned way_size_0;
Russell King3b8bad52014-03-15 16:47:57 +000034 unsigned num_lock;
Russell Kingc02642b2014-03-15 16:47:54 +000035 void (*of_parse)(const struct device_node *, u32 *, u32 *);
Russell King3b8bad52014-03-15 16:47:57 +000036 void (*enable)(void __iomem *, u32, unsigned);
Russell King75461f52014-03-15 16:48:07 +000037 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
Russell King9846dfc2014-03-15 16:47:55 +000038 void (*save)(void __iomem *);
Russell Kingc02642b2014-03-15 16:47:54 +000039 struct outer_cache_fns outer_cache;
40};
41
Catalin Marinas382266a2007-02-05 14:48:19 +010042#define CACHE_LINE_SIZE 32
43
44static void __iomem *l2x0_base;
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050045static DEFINE_RAW_SPINLOCK(l2x0_lock);
Russell King3e175ca2011-09-18 11:27:30 +010046static u32 l2x0_way_mask; /* Bitmask of active ways */
47static u32 l2x0_size;
Will Deaconf154fe92012-04-20 17:21:08 +010048static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
Catalin Marinas382266a2007-02-05 14:48:19 +010049
Barry Song91c2ebb2011-09-30 14:43:12 +010050struct l2x0_regs l2x0_saved_regs;
51
Russell King37abcdb2014-03-15 16:47:50 +000052/*
53 * Common code for all cache controllers.
54 */
Russell King83841fe2014-03-15 16:48:14 +000055static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010056{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010057 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010058 while (readl_relaxed(reg) & mask)
Barry Song1caf3092011-09-09 10:30:34 +010059 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010060}
61
Russell King2b2a87a2014-03-16 17:19:21 +000062/*
Russell King8abd2592014-03-16 17:38:08 +000063 * By default, we write directly to secure registers. Platforms must
64 * override this if they are running non-secure.
65 */
66static void l2c_write_sec(unsigned long val, void __iomem *base, unsigned reg)
67{
68 if (val == readl_relaxed(base + reg))
69 return;
70 if (outer_cache.write_sec)
71 outer_cache.write_sec(val, reg);
72 else
73 writel_relaxed(val, base + reg);
74}
75
76/*
Russell King2b2a87a2014-03-16 17:19:21 +000077 * This should only be called when we have a requirement that the
78 * register be written due to a work-around, as platforms running
79 * in non-secure mode may not be able to access this register.
80 */
81static inline void l2c_set_debug(void __iomem *base, unsigned long val)
82{
Russell King8abd2592014-03-16 17:38:08 +000083 if (outer_cache.set_debug)
84 outer_cache.set_debug(val);
85 else
86 l2c_write_sec(val, base, L2X0_DEBUG_CTRL);
Russell King2b2a87a2014-03-16 17:19:21 +000087}
88
Russell Kingdf5dd4c2014-03-15 16:47:56 +000089static void __l2c_op_way(void __iomem *reg)
90{
91 writel_relaxed(l2x0_way_mask, reg);
Russell King83841fe2014-03-15 16:48:14 +000092 l2c_wait_mask(reg, l2x0_way_mask);
Russell Kingdf5dd4c2014-03-15 16:47:56 +000093}
94
Russell King37abcdb2014-03-15 16:47:50 +000095static inline void l2c_unlock(void __iomem *base, unsigned num)
96{
97 unsigned i;
98
99 for (i = 0; i < num; i++) {
100 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
101 i * L2X0_LOCKDOWN_STRIDE);
102 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
103 i * L2X0_LOCKDOWN_STRIDE);
104 }
105}
106
Russell King3b8bad52014-03-15 16:47:57 +0000107/*
108 * Enable the L2 cache controller. This function must only be
109 * called when the cache controller is known to be disabled.
110 */
111static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
112{
113 unsigned long flags;
114
Russell King8abd2592014-03-16 17:38:08 +0000115 l2c_write_sec(aux, base, L2X0_AUX_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +0000116
Russell King17f3f992014-03-17 17:15:02 +0000117 l2c_unlock(base, num_lock);
118
Russell King3b8bad52014-03-15 16:47:57 +0000119 local_irq_save(flags);
120 __l2c_op_way(base + L2X0_INV_WAY);
121 writel_relaxed(0, base + sync_reg_offset);
122 l2c_wait_mask(base + sync_reg_offset, 1);
123 local_irq_restore(flags);
124
Russell King8abd2592014-03-16 17:38:08 +0000125 l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +0000126}
127
128static void l2c_disable(void)
129{
130 void __iomem *base = l2x0_base;
131
132 outer_cache.flush_all();
Russell King8abd2592014-03-16 17:38:08 +0000133 l2c_write_sec(0, base, L2X0_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +0000134 dsb(st);
135}
136
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100137#ifdef CONFIG_CACHE_PL310
138static inline void cache_wait(void __iomem *reg, unsigned long mask)
139{
140 /* cache operations by line are atomic on PL310 */
141}
142#else
Russell King83841fe2014-03-15 16:48:14 +0000143#define cache_wait l2c_wait_mask
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100144#endif
145
Catalin Marinas382266a2007-02-05 14:48:19 +0100146static inline void cache_sync(void)
147{
Russell King3d107432009-11-19 11:41:09 +0000148 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +0100149
Will Deaconf154fe92012-04-20 17:21:08 +0100150 writel_relaxed(0, base + sync_reg_offset);
Russell King3d107432009-11-19 11:41:09 +0000151 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100152}
153
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100154#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Will Deaconab4d5362012-04-20 17:22:11 +0100155static inline void debug_writel(unsigned long val)
156{
Russell King8abd2592014-03-16 17:38:08 +0000157 if (outer_cache.set_debug || outer_cache.write_sec)
Russell King2b2a87a2014-03-16 17:19:21 +0000158 l2c_set_debug(l2x0_base, val);
Will Deaconab4d5362012-04-20 17:22:11 +0100159}
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100160#else
161/* Optimised out for non-errata case */
162static inline void debug_writel(unsigned long val)
163{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100164}
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100165#endif
166
Catalin Marinas23107c52010-03-24 16:48:53 +0100167static void l2x0_cache_sync(void)
168{
169 unsigned long flags;
170
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500171 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100172 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500173 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100174}
175
Will Deacon38a89142011-07-01 14:36:19 +0100176static void __l2x0_flush_all(void)
177{
178 debug_writel(0x03);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000179 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
Will Deacon38a89142011-07-01 14:36:19 +0100180 cache_sync();
181 debug_writel(0x00);
182}
183
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530184static void l2x0_flush_all(void)
185{
186 unsigned long flags;
187
188 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500189 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100190 __l2x0_flush_all();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500191 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530192}
193
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530194static void l2x0_disable(void)
195{
196 unsigned long flags;
197
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500198 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100199 __l2x0_flush_all();
Russell King8abd2592014-03-16 17:38:08 +0000200 l2c_write_sec(0, l2x0_base, L2X0_CTRL);
Will Deacon9781aa82013-06-12 09:59:59 +0100201 dsb(st);
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500202 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530203}
204
Russell King75461f52014-03-15 16:48:07 +0000205/*
Russell King6a28cf52014-03-15 18:55:53 +0000206 * L2C-210 specific code.
207 *
208 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
209 * ensure that no background operation is running. The way operations
210 * are all background tasks.
211 *
212 * While a background operation is in progress, any new operation is
213 * ignored (unspecified whether this causes an error.) Thankfully, not
214 * used on SMP.
215 *
216 * Never has a different sync register other than L2X0_CACHE_SYNC, but
217 * we use sync_reg_offset here so we can share some of this with L2C-310.
218 */
219static void __l2c210_cache_sync(void __iomem *base)
220{
221 writel_relaxed(0, base + sync_reg_offset);
222}
223
224static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
225 unsigned long end)
226{
227 while (start < end) {
228 writel_relaxed(start, reg);
229 start += CACHE_LINE_SIZE;
230 }
231}
232
233static void l2c210_inv_range(unsigned long start, unsigned long end)
234{
235 void __iomem *base = l2x0_base;
236
237 if (start & (CACHE_LINE_SIZE - 1)) {
238 start &= ~(CACHE_LINE_SIZE - 1);
239 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
240 start += CACHE_LINE_SIZE;
241 }
242
243 if (end & (CACHE_LINE_SIZE - 1)) {
244 end &= ~(CACHE_LINE_SIZE - 1);
245 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
246 }
247
248 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
249 __l2c210_cache_sync(base);
250}
251
252static void l2c210_clean_range(unsigned long start, unsigned long end)
253{
254 void __iomem *base = l2x0_base;
255
256 start &= ~(CACHE_LINE_SIZE - 1);
257 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
258 __l2c210_cache_sync(base);
259}
260
261static void l2c210_flush_range(unsigned long start, unsigned long end)
262{
263 void __iomem *base = l2x0_base;
264
265 start &= ~(CACHE_LINE_SIZE - 1);
266 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
267 __l2c210_cache_sync(base);
268}
269
270static void l2c210_flush_all(void)
271{
272 void __iomem *base = l2x0_base;
273
274 BUG_ON(!irqs_disabled());
275
276 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
277 __l2c210_cache_sync(base);
278}
279
280static void l2c210_sync(void)
281{
282 __l2c210_cache_sync(l2x0_base);
283}
284
285static void l2c210_resume(void)
286{
287 void __iomem *base = l2x0_base;
288
289 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
290 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
291}
292
293static const struct l2c_init_data l2c210_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000294 .type = "L2C-210",
Russell King0493aef2014-03-15 23:26:24 +0000295 .way_size_0 = SZ_8K,
Russell King6a28cf52014-03-15 18:55:53 +0000296 .num_lock = 1,
297 .enable = l2c_enable,
298 .outer_cache = {
299 .inv_range = l2c210_inv_range,
300 .clean_range = l2c210_clean_range,
301 .flush_range = l2c210_flush_range,
302 .flush_all = l2c210_flush_all,
303 .disable = l2c_disable,
304 .sync = l2c210_sync,
305 .resume = l2c210_resume,
306 },
307};
308
309/*
Russell King733c6bb2014-03-15 21:29:28 +0000310 * L2C-220 specific code.
311 *
312 * All operations are background operations: they have to be waited for.
313 * Conflicting requests generate a slave error (which will cause an
314 * imprecise abort.) Never uses sync_reg_offset, so we hard-code the
315 * sync register here.
316 *
317 * However, we can re-use the l2c210_resume call.
318 */
319static inline void __l2c220_cache_sync(void __iomem *base)
320{
321 writel_relaxed(0, base + L2X0_CACHE_SYNC);
322 l2c_wait_mask(base + L2X0_CACHE_SYNC, 1);
323}
324
325static void l2c220_op_way(void __iomem *base, unsigned reg)
326{
327 unsigned long flags;
328
329 raw_spin_lock_irqsave(&l2x0_lock, flags);
330 __l2c_op_way(base + reg);
331 __l2c220_cache_sync(base);
332 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
333}
334
335static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
336 unsigned long end, unsigned long flags)
337{
338 raw_spinlock_t *lock = &l2x0_lock;
339
340 while (start < end) {
341 unsigned long blk_end = start + min(end - start, 4096UL);
342
343 while (start < blk_end) {
344 l2c_wait_mask(reg, 1);
345 writel_relaxed(start, reg);
346 start += CACHE_LINE_SIZE;
347 }
348
349 if (blk_end < end) {
350 raw_spin_unlock_irqrestore(lock, flags);
351 raw_spin_lock_irqsave(lock, flags);
352 }
353 }
354
355 return flags;
356}
357
358static void l2c220_inv_range(unsigned long start, unsigned long end)
359{
360 void __iomem *base = l2x0_base;
361 unsigned long flags;
362
363 raw_spin_lock_irqsave(&l2x0_lock, flags);
364 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
365 if (start & (CACHE_LINE_SIZE - 1)) {
366 start &= ~(CACHE_LINE_SIZE - 1);
367 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
368 start += CACHE_LINE_SIZE;
369 }
370
371 if (end & (CACHE_LINE_SIZE - 1)) {
372 end &= ~(CACHE_LINE_SIZE - 1);
373 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
374 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
375 }
376 }
377
378 flags = l2c220_op_pa_range(base + L2X0_INV_LINE_PA,
379 start, end, flags);
380 l2c_wait_mask(base + L2X0_INV_LINE_PA, 1);
381 __l2c220_cache_sync(base);
382 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
383}
384
385static void l2c220_clean_range(unsigned long start, unsigned long end)
386{
387 void __iomem *base = l2x0_base;
388 unsigned long flags;
389
390 start &= ~(CACHE_LINE_SIZE - 1);
391 if ((end - start) >= l2x0_size) {
392 l2c220_op_way(base, L2X0_CLEAN_WAY);
393 return;
394 }
395
396 raw_spin_lock_irqsave(&l2x0_lock, flags);
397 flags = l2c220_op_pa_range(base + L2X0_CLEAN_LINE_PA,
398 start, end, flags);
399 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
400 __l2c220_cache_sync(base);
401 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
402}
403
404static void l2c220_flush_range(unsigned long start, unsigned long end)
405{
406 void __iomem *base = l2x0_base;
407 unsigned long flags;
408
409 start &= ~(CACHE_LINE_SIZE - 1);
410 if ((end - start) >= l2x0_size) {
411 l2c220_op_way(base, L2X0_CLEAN_INV_WAY);
412 return;
413 }
414
415 raw_spin_lock_irqsave(&l2x0_lock, flags);
416 flags = l2c220_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA,
417 start, end, flags);
418 l2c_wait_mask(base + L2X0_CLEAN_INV_LINE_PA, 1);
419 __l2c220_cache_sync(base);
420 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
421}
422
423static void l2c220_flush_all(void)
424{
425 l2c220_op_way(l2x0_base, L2X0_CLEAN_INV_WAY);
426}
427
428static void l2c220_sync(void)
429{
430 unsigned long flags;
431
432 raw_spin_lock_irqsave(&l2x0_lock, flags);
433 __l2c220_cache_sync(l2x0_base);
434 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
435}
436
437static const struct l2c_init_data l2c220_data = {
Russell King051334b2014-03-15 23:04:10 +0000438 .type = "L2C-220",
Russell King0493aef2014-03-15 23:26:24 +0000439 .way_size_0 = SZ_8K,
Russell King733c6bb2014-03-15 21:29:28 +0000440 .num_lock = 1,
441 .enable = l2c_enable,
442 .outer_cache = {
443 .inv_range = l2c220_inv_range,
444 .clean_range = l2c220_clean_range,
445 .flush_range = l2c220_flush_range,
446 .flush_all = l2c220_flush_all,
447 .disable = l2c_disable,
448 .sync = l2c220_sync,
449 .resume = l2c210_resume,
450 },
451};
452
453/*
Russell King75461f52014-03-15 16:48:07 +0000454 * L2C-310 specific code.
455 *
Russell Kingf7773322014-03-15 20:51:47 +0000456 * Very similar to L2C-210, the PA, set/way and sync operations are atomic,
457 * and the way operations are all background tasks. However, issuing an
458 * operation while a background operation is in progress results in a
459 * SLVERR response. We can reuse:
460 *
461 * __l2c210_cache_sync (using sync_reg_offset)
462 * l2c210_sync
463 * l2c210_inv_range (if 588369 is not applicable)
464 * l2c210_clean_range
465 * l2c210_flush_range (if 588369 is not applicable)
466 * l2c210_flush_all (if 727915 is not applicable)
467 *
Russell King75461f52014-03-15 16:48:07 +0000468 * Errata:
469 * 588369: PL310 R0P0->R1P0, fixed R2P0.
470 * Affects: all clean+invalidate operations
471 * clean and invalidate skips the invalidate step, so we need to issue
472 * separate operations. We also require the above debug workaround
473 * enclosing this code fragment on affected parts. On unaffected parts,
474 * we must not use this workaround without the debug register writes
475 * to avoid exposing a problem similar to 727915.
476 *
477 * 727915: PL310 R2P0->R3P0, fixed R3P1.
478 * Affects: clean+invalidate by way
479 * clean and invalidate by way runs in the background, and a store can
480 * hit the line between the clean operation and invalidate operation,
481 * resulting in the store being lost.
482 *
483 * 753970: PL310 R3P0, fixed R3P1.
484 * Affects: sync
485 * prevents merging writes after the sync operation, until another L2C
486 * operation is performed (or a number of other conditions.)
487 *
488 * 769419: PL310 R0P0->R3P1, fixed R3P2.
489 * Affects: store buffer
490 * store buffer is not automatically drained.
491 */
Russell Kingbda0b742014-03-15 16:48:16 +0000492static void l2c310_set_debug(unsigned long val)
493{
494 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
495}
496
Russell Kingebd4219f2014-03-15 19:08:11 +0000497static void l2c310_inv_range_erratum(unsigned long start, unsigned long end)
498{
499 void __iomem *base = l2x0_base;
500
501 if ((start | end) & (CACHE_LINE_SIZE - 1)) {
502 unsigned long flags;
503
504 /* Erratum 588369 for both clean+invalidate operations */
505 raw_spin_lock_irqsave(&l2x0_lock, flags);
506 l2c_set_debug(base, 0x03);
507
508 if (start & (CACHE_LINE_SIZE - 1)) {
509 start &= ~(CACHE_LINE_SIZE - 1);
510 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
511 writel_relaxed(start, base + L2X0_INV_LINE_PA);
512 start += CACHE_LINE_SIZE;
513 }
514
515 if (end & (CACHE_LINE_SIZE - 1)) {
516 end &= ~(CACHE_LINE_SIZE - 1);
517 writel_relaxed(end, base + L2X0_CLEAN_LINE_PA);
518 writel_relaxed(end, base + L2X0_INV_LINE_PA);
519 }
520
521 l2c_set_debug(base, 0x00);
522 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
523 }
524
525 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
526 __l2c210_cache_sync(base);
527}
528
529static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
530{
531 raw_spinlock_t *lock = &l2x0_lock;
532 unsigned long flags;
533 void __iomem *base = l2x0_base;
534
535 raw_spin_lock_irqsave(lock, flags);
536 while (start < end) {
537 unsigned long blk_end = start + min(end - start, 4096UL);
538
539 l2c_set_debug(base, 0x03);
540 while (start < blk_end) {
541 writel_relaxed(start, base + L2X0_CLEAN_LINE_PA);
542 writel_relaxed(start, base + L2X0_INV_LINE_PA);
543 start += CACHE_LINE_SIZE;
544 }
545 l2c_set_debug(base, 0x00);
546
547 if (blk_end < end) {
548 raw_spin_unlock_irqrestore(lock, flags);
549 raw_spin_lock_irqsave(lock, flags);
550 }
551 }
552 raw_spin_unlock_irqrestore(lock, flags);
553 __l2c210_cache_sync(base);
554}
555
Russell King99ca17722014-03-15 16:48:18 +0000556static void l2c310_flush_all_erratum(void)
557{
558 void __iomem *base = l2x0_base;
559 unsigned long flags;
560
561 raw_spin_lock_irqsave(&l2x0_lock, flags);
562 l2c_set_debug(base, 0x03);
563 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
564 l2c_set_debug(base, 0x00);
565 __l2c210_cache_sync(base);
566 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
567}
568
Russell King09a5d182014-03-15 16:48:13 +0000569static void __init l2c310_save(void __iomem *base)
Russell Kingb98556f22014-03-15 16:48:11 +0000570{
Russell King09a5d182014-03-15 16:48:13 +0000571 unsigned revision;
Russell Kingb98556f22014-03-15 16:48:11 +0000572
573 l2x0_saved_regs.tag_latency = readl_relaxed(base +
574 L2X0_TAG_LATENCY_CTRL);
575 l2x0_saved_regs.data_latency = readl_relaxed(base +
576 L2X0_DATA_LATENCY_CTRL);
577 l2x0_saved_regs.filter_end = readl_relaxed(base +
578 L2X0_ADDR_FILTER_END);
579 l2x0_saved_regs.filter_start = readl_relaxed(base +
580 L2X0_ADDR_FILTER_START);
581
Russell King09a5d182014-03-15 16:48:13 +0000582 revision = readl_relaxed(base + L2X0_CACHE_ID) &
Russell Kingb98556f22014-03-15 16:48:11 +0000583 L2X0_CACHE_ID_RTL_MASK;
584
Russell King09a5d182014-03-15 16:48:13 +0000585 /* From r2p0, there is Prefetch offset/control register */
586 if (revision >= L310_CACHE_ID_RTL_R2P0)
587 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
588 L2X0_PREFETCH_CTRL);
Russell Kingb98556f22014-03-15 16:48:11 +0000589
Russell King09a5d182014-03-15 16:48:13 +0000590 /* From r3p0, there is Power control register */
591 if (revision >= L310_CACHE_ID_RTL_R3P0)
592 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
593 L2X0_POWER_CTRL);
594}
595
596static void l2c310_resume(void)
597{
598 void __iomem *base = l2x0_base;
599
600 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
601 unsigned revision;
602
603 /* restore pl310 setup */
604 writel_relaxed(l2x0_saved_regs.tag_latency,
605 base + L2X0_TAG_LATENCY_CTRL);
606 writel_relaxed(l2x0_saved_regs.data_latency,
607 base + L2X0_DATA_LATENCY_CTRL);
608 writel_relaxed(l2x0_saved_regs.filter_end,
609 base + L2X0_ADDR_FILTER_END);
610 writel_relaxed(l2x0_saved_regs.filter_start,
611 base + L2X0_ADDR_FILTER_START);
612
613 revision = readl_relaxed(base + L2X0_CACHE_ID) &
614 L2X0_CACHE_ID_RTL_MASK;
615
616 if (revision >= L310_CACHE_ID_RTL_R2P0)
Russell King8abd2592014-03-16 17:38:08 +0000617 l2c_write_sec(l2x0_saved_regs.prefetch_ctrl, base,
618 L2X0_PREFETCH_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000619 if (revision >= L310_CACHE_ID_RTL_R3P0)
Russell King8abd2592014-03-16 17:38:08 +0000620 l2c_write_sec(l2x0_saved_regs.pwr_ctrl, base,
621 L2X0_POWER_CTRL);
Russell King09a5d182014-03-15 16:48:13 +0000622
623 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
624 }
Russell Kingb98556f22014-03-15 16:48:11 +0000625}
626
Russell King75461f52014-03-15 16:48:07 +0000627static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
628 struct outer_cache_fns *fns)
629{
630 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
631 const char *errata[4];
632 unsigned n = 0;
633
Russell Kingebd4219f2014-03-15 19:08:11 +0000634 /* For compatibility */
Russell King75461f52014-03-15 16:48:07 +0000635 if (revision <= L310_CACHE_ID_RTL_R3P0)
Russell Kingbda0b742014-03-15 16:48:16 +0000636 fns->set_debug = l2c310_set_debug;
Russell King75461f52014-03-15 16:48:07 +0000637
Russell Kingebd4219f2014-03-15 19:08:11 +0000638 if (IS_ENABLED(CONFIG_PL310_ERRATA_588369) &&
639 revision < L310_CACHE_ID_RTL_R2P0 &&
640 /* For bcm compatibility */
Russell Kingf7773322014-03-15 20:51:47 +0000641 fns->inv_range == l2c210_inv_range) {
Russell Kingebd4219f2014-03-15 19:08:11 +0000642 fns->inv_range = l2c310_inv_range_erratum;
643 fns->flush_range = l2c310_flush_range_erratum;
644 errata[n++] = "588369";
645 }
646
Russell King99ca17722014-03-15 16:48:18 +0000647 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
648 revision >= L310_CACHE_ID_RTL_R2P0 &&
649 revision < L310_CACHE_ID_RTL_R3P1) {
650 fns->flush_all = l2c310_flush_all_erratum;
651 errata[n++] = "727915";
652 }
653
Russell King75461f52014-03-15 16:48:07 +0000654 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
655 revision == L310_CACHE_ID_RTL_R3P0) {
656 sync_reg_offset = L2X0_DUMMY_REG;
657 errata[n++] = "753970";
658 }
659
660 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
661 errata[n++] = "769419";
662
663 if (n) {
664 unsigned i;
665
666 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
667 for (i = 0; i < n; i++)
668 pr_cont(" %s", errata[i]);
669 pr_cont(" enabled\n");
670 }
671}
672
673static const struct l2c_init_data l2c310_init_fns __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000674 .type = "L2C-310",
Russell King0493aef2014-03-15 23:26:24 +0000675 .way_size_0 = SZ_8K,
Russell King75461f52014-03-15 16:48:07 +0000676 .num_lock = 8,
677 .enable = l2c_enable,
678 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000679 .save = l2c310_save,
Russell King75461f52014-03-15 16:48:07 +0000680 .outer_cache = {
Russell Kingf7773322014-03-15 20:51:47 +0000681 .inv_range = l2c210_inv_range,
682 .clean_range = l2c210_clean_range,
683 .flush_range = l2c210_flush_range,
684 .flush_all = l2c210_flush_all,
685 .disable = l2c_disable,
686 .sync = l2c210_sync,
687 .set_debug = l2c310_set_debug,
Russell King09a5d182014-03-15 16:48:13 +0000688 .resume = l2c310_resume,
Russell King75461f52014-03-15 16:48:07 +0000689 },
690};
691
Russell King96054b02014-03-15 16:47:52 +0000692static void __init __l2c_init(const struct l2c_init_data *data,
693 u32 aux_val, u32 aux_mask, u32 cache_id)
Catalin Marinas382266a2007-02-05 14:48:19 +0100694{
Russell King75461f52014-03-15 16:48:07 +0000695 struct outer_cache_fns fns;
Russell King0493aef2014-03-15 23:26:24 +0000696 unsigned way_size_bits, ways;
Russell King3e175ca2011-09-18 11:27:30 +0100697 u32 aux;
Catalin Marinas382266a2007-02-05 14:48:19 +0100698
Russell Kingc40e7eb2014-03-15 16:48:04 +0000699 /*
700 * It is strange to save the register state before initialisation,
701 * but hey, this is what the DT implementations decided to do.
702 */
703 if (data->save)
704 data->save(l2x0_base);
705
Catalin Marinas6775a552010-07-28 22:01:25 +0100706 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100707
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100708 aux &= aux_mask;
709 aux |= aux_val;
710
Jason McMullan64039be2010-05-05 18:59:37 +0100711 /* Determine the number of ways */
Rob Herring6e7acee2013-03-25 17:02:48 +0100712 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100713 case L2X0_CACHE_ID_PART_L310:
714 if (aux & (1 << 16))
715 ways = 16;
716 else
717 ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100718 break;
Russell King75461f52014-03-15 16:48:07 +0000719
Jason McMullan64039be2010-05-05 18:59:37 +0100720 case L2X0_CACHE_ID_PART_L210:
Russell King5f47c382014-03-15 23:07:07 +0000721 case L2X0_CACHE_ID_PART_L220:
Jason McMullan64039be2010-05-05 18:59:37 +0100722 ways = (aux >> 13) & 0xf;
Jason McMullan64039be2010-05-05 18:59:37 +0100723 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100724
725 case AURORA_CACHE_ID:
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100726 ways = (aux >> 13) & 0xf;
727 ways = 2 << ((ways + 1) >> 2);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100728 break;
Russell King75461f52014-03-15 16:48:07 +0000729
Jason McMullan64039be2010-05-05 18:59:37 +0100730 default:
731 /* Assume unknown chips have 8 ways */
732 ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100733 break;
734 }
735
736 l2x0_way_mask = (1 << ways) - 1;
737
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100738 /*
Russell King0493aef2014-03-15 23:26:24 +0000739 * way_size_0 is the size that a way_size value of zero would be
740 * given the calculation: way_size = way_size_0 << way_size_bits.
741 * So, if way_size_bits=0 is reserved, but way_size_bits=1 is 16k,
742 * then way_size_0 would be 8k.
743 *
744 * L2 cache size = number of ways * way size.
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530745 */
Russell King0493aef2014-03-15 23:26:24 +0000746 way_size_bits = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
747 l2x0_size = ways * (data->way_size_0 << way_size_bits);
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530748
Russell King75461f52014-03-15 16:48:07 +0000749 fns = data->outer_cache;
Russell King8abd2592014-03-16 17:38:08 +0000750 fns.write_sec = outer_cache.write_sec;
Russell King75461f52014-03-15 16:48:07 +0000751 if (data->fixup)
752 data->fixup(l2x0_base, cache_id, &fns);
Russell King8abd2592014-03-16 17:38:08 +0000753 if (fns.write_sec)
754 fns.set_debug = NULL;
Russell King75461f52014-03-15 16:48:07 +0000755
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530756 /*
Russell King3b8bad52014-03-15 16:47:57 +0000757 * Check if l2x0 controller is already enabled. If we are booting
758 * in non-secure mode accessing the below registers will fault.
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100759 */
Russell King3b8bad52014-03-15 16:47:57 +0000760 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
761 data->enable(l2x0_base, aux, data->num_lock);
Catalin Marinas382266a2007-02-05 14:48:19 +0100762
Yilu Mao9d4876f2012-09-03 09:14:56 +0100763 /* Re-read it in case some bits are reserved. */
764 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
765
766 /* Save the value for resuming. */
767 l2x0_saved_regs.aux_ctrl = aux;
768
Russell King75461f52014-03-15 16:48:07 +0000769 outer_cache = fns;
Catalin Marinas382266a2007-02-05 14:48:19 +0100770
Russell Kingcdef8682014-03-15 16:48:08 +0000771 pr_info("%s cache controller enabled, %d ways, %d kB\n",
Russell King051334b2014-03-15 23:04:10 +0000772 data->type, ways, l2x0_size >> 10);
Russell Kingcdef8682014-03-15 16:48:08 +0000773 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
Russell King051334b2014-03-15 23:04:10 +0000774 data->type, cache_id, aux);
Catalin Marinas382266a2007-02-05 14:48:19 +0100775}
Rob Herring8c369262011-08-03 18:12:05 +0100776
Russell King96054b02014-03-15 16:47:52 +0000777void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
778{
Russell King75461f52014-03-15 16:48:07 +0000779 const struct l2c_init_data *data;
Russell King96054b02014-03-15 16:47:52 +0000780 u32 cache_id;
781
782 l2x0_base = base;
783
784 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
785
Russell King75461f52014-03-15 16:48:07 +0000786 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
787 default:
Russell King6a28cf52014-03-15 18:55:53 +0000788 case L2X0_CACHE_ID_PART_L210:
789 data = &l2c210_data;
790 break;
791
Russell King733c6bb2014-03-15 21:29:28 +0000792 case L2X0_CACHE_ID_PART_L220:
793 data = &l2c220_data;
794 break;
795
Russell King75461f52014-03-15 16:48:07 +0000796 case L2X0_CACHE_ID_PART_L310:
797 data = &l2c310_init_fns;
798 break;
799 }
800
801 __l2c_init(data, aux_val, aux_mask, cache_id);
Russell King96054b02014-03-15 16:47:52 +0000802}
803
Rob Herring8c369262011-08-03 18:12:05 +0100804#ifdef CONFIG_OF
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100805static int l2_wt_override;
806
Russell King96054b02014-03-15 16:47:52 +0000807/* Aurora don't have the cache ID register available, so we have to
808 * pass it though the device tree */
809static u32 cache_id_part_number_from_dt;
810
Russell Kingda3627f2014-03-15 16:48:06 +0000811static void __init l2x0_of_parse(const struct device_node *np,
812 u32 *aux_val, u32 *aux_mask)
813{
814 u32 data[2] = { 0, 0 };
815 u32 tag = 0;
816 u32 dirty = 0;
817 u32 val = 0, mask = 0;
818
819 of_property_read_u32(np, "arm,tag-latency", &tag);
820 if (tag) {
821 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
822 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
823 }
824
825 of_property_read_u32_array(np, "arm,data-latency",
826 data, ARRAY_SIZE(data));
827 if (data[0] && data[1]) {
828 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
829 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
830 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
831 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
832 }
833
834 of_property_read_u32(np, "arm,dirty-latency", &dirty);
835 if (dirty) {
836 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
837 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
838 }
839
840 *aux_val &= ~mask;
841 *aux_val |= val;
842 *aux_mask &= ~mask;
843}
844
Russell King6a28cf52014-03-15 18:55:53 +0000845static const struct l2c_init_data of_l2c210_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000846 .type = "L2C-210",
Russell King0493aef2014-03-15 23:26:24 +0000847 .way_size_0 = SZ_8K,
Russell King6a28cf52014-03-15 18:55:53 +0000848 .num_lock = 1,
849 .of_parse = l2x0_of_parse,
850 .enable = l2c_enable,
851 .outer_cache = {
852 .inv_range = l2c210_inv_range,
853 .clean_range = l2c210_clean_range,
854 .flush_range = l2c210_flush_range,
855 .flush_all = l2c210_flush_all,
856 .disable = l2c_disable,
857 .sync = l2c210_sync,
858 .resume = l2c210_resume,
859 },
860};
861
Russell King733c6bb2014-03-15 21:29:28 +0000862static const struct l2c_init_data of_l2c220_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000863 .type = "L2C-220",
Russell King0493aef2014-03-15 23:26:24 +0000864 .way_size_0 = SZ_8K,
Russell King733c6bb2014-03-15 21:29:28 +0000865 .num_lock = 1,
Russell Kingda3627f2014-03-15 16:48:06 +0000866 .of_parse = l2x0_of_parse,
Russell King733c6bb2014-03-15 21:29:28 +0000867 .enable = l2c_enable,
Russell Kingda3627f2014-03-15 16:48:06 +0000868 .outer_cache = {
Russell King733c6bb2014-03-15 21:29:28 +0000869 .inv_range = l2c220_inv_range,
870 .clean_range = l2c220_clean_range,
871 .flush_range = l2c220_flush_range,
872 .flush_all = l2c220_flush_all,
873 .disable = l2c_disable,
874 .sync = l2c220_sync,
875 .resume = l2c210_resume,
Russell Kingda3627f2014-03-15 16:48:06 +0000876 },
877};
878
Russell Kingf7773322014-03-15 20:51:47 +0000879static void __init l2c310_of_parse(const struct device_node *np,
880 u32 *aux_val, u32 *aux_mask)
Russell Kingda3627f2014-03-15 16:48:06 +0000881{
882 u32 data[3] = { 0, 0, 0 };
883 u32 tag[3] = { 0, 0, 0 };
884 u32 filter[2] = { 0, 0 };
885
886 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
887 if (tag[0] && tag[1] && tag[2])
888 writel_relaxed(
889 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
890 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
891 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
892 l2x0_base + L2X0_TAG_LATENCY_CTRL);
893
894 of_property_read_u32_array(np, "arm,data-latency",
895 data, ARRAY_SIZE(data));
896 if (data[0] && data[1] && data[2])
897 writel_relaxed(
898 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
899 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
900 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
901 l2x0_base + L2X0_DATA_LATENCY_CTRL);
902
903 of_property_read_u32_array(np, "arm,filter-ranges",
904 filter, ARRAY_SIZE(filter));
905 if (filter[1]) {
906 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
907 l2x0_base + L2X0_ADDR_FILTER_END);
908 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
909 l2x0_base + L2X0_ADDR_FILTER_START);
910 }
911}
912
Russell Kingf7773322014-03-15 20:51:47 +0000913static const struct l2c_init_data of_l2c310_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +0000914 .type = "L2C-310",
Russell King0493aef2014-03-15 23:26:24 +0000915 .way_size_0 = SZ_8K,
Russell King3b8bad52014-03-15 16:47:57 +0000916 .num_lock = 8,
Russell Kingf7773322014-03-15 20:51:47 +0000917 .of_parse = l2c310_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000918 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +0000919 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000920 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +0000921 .outer_cache = {
Russell Kingf7773322014-03-15 20:51:47 +0000922 .inv_range = l2c210_inv_range,
923 .clean_range = l2c210_clean_range,
924 .flush_range = l2c210_flush_range,
925 .flush_all = l2c210_flush_all,
926 .disable = l2c_disable,
927 .sync = l2c210_sync,
928 .set_debug = l2c310_set_debug,
Russell King09a5d182014-03-15 16:48:13 +0000929 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +0000930 },
931};
932
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100933/*
934 * Note that the end addresses passed to Linux primitives are
935 * noninclusive, while the hardware cache range operations use
936 * inclusive start and end addresses.
937 */
938static unsigned long calc_range_end(unsigned long start, unsigned long end)
939{
940 /*
941 * Limit the number of cache lines processed at once,
942 * since cache range operations stall the CPU pipeline
943 * until completion.
944 */
945 if (end > start + MAX_RANGE_SIZE)
946 end = start + MAX_RANGE_SIZE;
947
948 /*
949 * Cache range operations can't straddle a page boundary.
950 */
951 if (end > PAGE_ALIGN(start+1))
952 end = PAGE_ALIGN(start+1);
953
954 return end;
955}
956
957/*
958 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
959 * and range operations only do a TLB lookup on the start address.
960 */
961static void aurora_pa_range(unsigned long start, unsigned long end,
962 unsigned long offset)
963{
964 unsigned long flags;
965
966 raw_spin_lock_irqsave(&l2x0_lock, flags);
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +0100967 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
968 writel_relaxed(end, l2x0_base + offset);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100969 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
970
971 cache_sync();
972}
973
974static void aurora_inv_range(unsigned long start, unsigned long end)
975{
976 /*
977 * round start and end adresses up to cache line size
978 */
979 start &= ~(CACHE_LINE_SIZE - 1);
980 end = ALIGN(end, CACHE_LINE_SIZE);
981
982 /*
983 * Invalidate all full cache lines between 'start' and 'end'.
984 */
985 while (start < end) {
986 unsigned long range_end = calc_range_end(start, end);
987 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
988 AURORA_INVAL_RANGE_REG);
989 start = range_end;
990 }
991}
992
993static void aurora_clean_range(unsigned long start, unsigned long end)
994{
995 /*
996 * If L2 is forced to WT, the L2 will always be clean and we
997 * don't need to do anything here.
998 */
999 if (!l2_wt_override) {
1000 start &= ~(CACHE_LINE_SIZE - 1);
1001 end = ALIGN(end, CACHE_LINE_SIZE);
1002 while (start != end) {
1003 unsigned long range_end = calc_range_end(start, end);
1004 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1005 AURORA_CLEAN_RANGE_REG);
1006 start = range_end;
1007 }
1008 }
1009}
1010
1011static void aurora_flush_range(unsigned long start, unsigned long end)
1012{
Gregory CLEMENT8b827c62013-01-07 11:27:14 +01001013 start &= ~(CACHE_LINE_SIZE - 1);
1014 end = ALIGN(end, CACHE_LINE_SIZE);
1015 while (start != end) {
1016 unsigned long range_end = calc_range_end(start, end);
1017 /*
1018 * If L2 is forced to WT, the L2 will always be clean and we
1019 * just need to invalidate.
1020 */
1021 if (l2_wt_override)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001022 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
Gregory CLEMENT8b827c62013-01-07 11:27:14 +01001023 AURORA_INVAL_RANGE_REG);
1024 else
1025 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
1026 AURORA_FLUSH_RANGE_REG);
1027 start = range_end;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001028 }
1029}
1030
Russell Kingda3627f2014-03-15 16:48:06 +00001031static void aurora_save(void __iomem *base)
1032{
1033 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
1034 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
1035}
1036
1037static void aurora_resume(void)
1038{
Russell King09a5d182014-03-15 16:48:13 +00001039 void __iomem *base = l2x0_base;
1040
1041 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1042 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
1043 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
Russell Kingda3627f2014-03-15 16:48:06 +00001044 }
1045}
1046
Russell King40266d62014-03-15 16:47:59 +00001047/*
1048 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
1049 * broadcasting of cache commands to L2.
1050 */
1051static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
1052 unsigned num_lock)
Russell Kingda3627f2014-03-15 16:48:06 +00001053{
Russell King40266d62014-03-15 16:47:59 +00001054 u32 u;
1055
1056 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
Russell Kingda3627f2014-03-15 16:48:06 +00001057 u |= AURORA_CTRL_FW; /* Set the FW bit */
Russell King40266d62014-03-15 16:47:59 +00001058 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1059
Russell Kingda3627f2014-03-15 16:48:06 +00001060 isb();
Russell King40266d62014-03-15 16:47:59 +00001061
1062 l2c_enable(base, aux, num_lock);
Russell Kingda3627f2014-03-15 16:48:06 +00001063}
1064
Russell King75461f52014-03-15 16:48:07 +00001065static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1066 struct outer_cache_fns *fns)
1067{
1068 sync_reg_offset = AURORA_SYNC_REG;
1069}
1070
Russell Kingda3627f2014-03-15 16:48:06 +00001071static void __init aurora_of_parse(const struct device_node *np,
1072 u32 *aux_val, u32 *aux_mask)
1073{
1074 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1075 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1076
1077 of_property_read_u32(np, "cache-id-part",
1078 &cache_id_part_number_from_dt);
1079
1080 /* Determine and save the write policy */
1081 l2_wt_override = of_property_read_bool(np, "wt-override");
1082
1083 if (l2_wt_override) {
1084 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1085 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1086 }
1087
1088 *aux_val &= ~mask;
1089 *aux_val |= val;
1090 *aux_mask &= ~mask;
1091}
1092
1093static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001094 .type = "Aurora",
Russell King0493aef2014-03-15 23:26:24 +00001095 .way_size_0 = SZ_4K,
Russell King3b8bad52014-03-15 16:47:57 +00001096 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +00001097 .of_parse = aurora_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +00001098 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +00001099 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +00001100 .save = aurora_save,
1101 .outer_cache = {
1102 .inv_range = aurora_inv_range,
1103 .clean_range = aurora_clean_range,
1104 .flush_range = aurora_flush_range,
1105 .flush_all = l2x0_flush_all,
1106 .disable = l2x0_disable,
1107 .sync = l2x0_cache_sync,
1108 .resume = aurora_resume,
1109 },
1110};
1111
1112static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001113 .type = "Aurora",
Russell King0493aef2014-03-15 23:26:24 +00001114 .way_size_0 = SZ_4K,
Russell King3b8bad52014-03-15 16:47:57 +00001115 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +00001116 .of_parse = aurora_of_parse,
Russell King40266d62014-03-15 16:47:59 +00001117 .enable = aurora_enable_no_outer,
Russell King75461f52014-03-15 16:48:07 +00001118 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +00001119 .save = aurora_save,
1120 .outer_cache = {
1121 .resume = aurora_resume,
1122 },
1123};
1124
Christian Daudt3b656fe2013-05-09 22:21:01 +01001125/*
1126 * For certain Broadcom SoCs, depending on the address range, different offsets
1127 * need to be added to the address before passing it to L2 for
1128 * invalidation/clean/flush
1129 *
1130 * Section Address Range Offset EMI
1131 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
1132 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
1133 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
1134 *
1135 * When the start and end addresses have crossed two different sections, we
1136 * need to break the L2 operation into two, each within its own section.
1137 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1138 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1139 * 0xC0000000 - 0xC0001000
1140 *
1141 * Note 1:
1142 * By breaking a single L2 operation into two, we may potentially suffer some
1143 * performance hit, but keep in mind the cross section case is very rare
1144 *
1145 * Note 2:
1146 * We do not need to handle the case when the start address is in
1147 * Section 1 and the end address is in Section 3, since it is not a valid use
1148 * case
1149 *
1150 * Note 3:
1151 * Section 1 in practical terms can no longer be used on rev A2. Because of
1152 * that the code does not need to handle section 1 at all.
1153 *
1154 */
1155#define BCM_SYS_EMI_START_ADDR 0x40000000UL
1156#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
1157
1158#define BCM_SYS_EMI_OFFSET 0x40000000UL
1159#define BCM_VC_EMI_OFFSET 0x80000000UL
1160
1161static inline int bcm_addr_is_sys_emi(unsigned long addr)
1162{
1163 return (addr >= BCM_SYS_EMI_START_ADDR) &&
1164 (addr < BCM_VC_EMI_SEC3_START_ADDR);
1165}
1166
1167static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1168{
1169 if (bcm_addr_is_sys_emi(addr))
1170 return addr + BCM_SYS_EMI_OFFSET;
1171 else
1172 return addr + BCM_VC_EMI_OFFSET;
1173}
1174
1175static void bcm_inv_range(unsigned long start, unsigned long end)
1176{
1177 unsigned long new_start, new_end;
1178
1179 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1180
1181 if (unlikely(end <= start))
1182 return;
1183
1184 new_start = bcm_l2_phys_addr(start);
1185 new_end = bcm_l2_phys_addr(end);
1186
1187 /* normal case, no cross section between start and end */
1188 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001189 l2c210_inv_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001190 return;
1191 }
1192
1193 /* They cross sections, so it can only be a cross from section
1194 * 2 to section 3
1195 */
Russell King90811142014-03-19 19:14:13 +00001196 l2c210_inv_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001197 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001198 l2c210_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001199 new_end);
1200}
1201
1202static void bcm_clean_range(unsigned long start, unsigned long end)
1203{
1204 unsigned long new_start, new_end;
1205
1206 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1207
1208 if (unlikely(end <= start))
1209 return;
1210
Christian Daudt3b656fe2013-05-09 22:21:01 +01001211 new_start = bcm_l2_phys_addr(start);
1212 new_end = bcm_l2_phys_addr(end);
1213
1214 /* normal case, no cross section between start and end */
1215 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001216 l2c210_clean_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001217 return;
1218 }
1219
1220 /* They cross sections, so it can only be a cross from section
1221 * 2 to section 3
1222 */
Russell King90811142014-03-19 19:14:13 +00001223 l2c210_clean_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001224 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001225 l2c210_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001226 new_end);
1227}
1228
1229static void bcm_flush_range(unsigned long start, unsigned long end)
1230{
1231 unsigned long new_start, new_end;
1232
1233 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1234
1235 if (unlikely(end <= start))
1236 return;
1237
1238 if ((end - start) >= l2x0_size) {
Russell King90811142014-03-19 19:14:13 +00001239 outer_cache.flush_all();
Christian Daudt3b656fe2013-05-09 22:21:01 +01001240 return;
1241 }
1242
1243 new_start = bcm_l2_phys_addr(start);
1244 new_end = bcm_l2_phys_addr(end);
1245
1246 /* normal case, no cross section between start and end */
1247 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
Russell King90811142014-03-19 19:14:13 +00001248 l2c210_flush_range(new_start, new_end);
Christian Daudt3b656fe2013-05-09 22:21:01 +01001249 return;
1250 }
1251
1252 /* They cross sections, so it can only be a cross from section
1253 * 2 to section 3
1254 */
Russell King90811142014-03-19 19:14:13 +00001255 l2c210_flush_range(new_start,
Christian Daudt3b656fe2013-05-09 22:21:01 +01001256 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
Russell King90811142014-03-19 19:14:13 +00001257 l2c210_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
Christian Daudt3b656fe2013-05-09 22:21:01 +01001258 new_end);
1259}
1260
Russell King90811142014-03-19 19:14:13 +00001261/* Broadcom L2C-310 start from ARMs R3P2 or later, and require no fixups */
Russell Kingda3627f2014-03-15 16:48:06 +00001262static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001263 .type = "BCM-L2C-310",
Russell King0493aef2014-03-15 23:26:24 +00001264 .way_size_0 = SZ_8K,
Russell King3b8bad52014-03-15 16:47:57 +00001265 .num_lock = 8,
Russell Kingf7773322014-03-15 20:51:47 +00001266 .of_parse = l2c310_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +00001267 .enable = l2c_enable,
Russell King09a5d182014-03-15 16:48:13 +00001268 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +00001269 .outer_cache = {
1270 .inv_range = bcm_inv_range,
1271 .clean_range = bcm_clean_range,
1272 .flush_range = bcm_flush_range,
Russell Kingf7773322014-03-15 20:51:47 +00001273 .flush_all = l2c210_flush_all,
1274 .disable = l2c_disable,
1275 .sync = l2c210_sync,
Russell King09a5d182014-03-15 16:48:13 +00001276 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +00001277 },
1278};
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001279
Russell King9846dfc2014-03-15 16:47:55 +00001280static void __init tauros3_save(void __iomem *base)
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001281{
1282 l2x0_saved_regs.aux2_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001283 readl_relaxed(base + TAUROS3_AUX2_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001284 l2x0_saved_regs.prefetch_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001285 readl_relaxed(base + L2X0_PREFETCH_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001286}
1287
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001288static void tauros3_resume(void)
1289{
Russell King09a5d182014-03-15 16:48:13 +00001290 void __iomem *base = l2x0_base;
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001291
Russell King09a5d182014-03-15 16:48:13 +00001292 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1293 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1294 base + TAUROS3_AUX2_CTRL);
1295 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1296 base + L2X0_PREFETCH_CTRL);
1297
1298 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
1299 }
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001300}
1301
Russell Kingc02642b2014-03-15 16:47:54 +00001302static const struct l2c_init_data of_tauros3_data __initconst = {
Russell King051334b2014-03-15 23:04:10 +00001303 .type = "Tauros3",
Russell King0493aef2014-03-15 23:26:24 +00001304 .way_size_0 = SZ_8K,
Russell King3b8bad52014-03-15 16:47:57 +00001305 .num_lock = 8,
1306 .enable = l2c_enable,
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001307 .save = tauros3_save,
1308 /* Tauros3 broadcasts L1 cache operations to L2 */
1309 .outer_cache = {
1310 .resume = tauros3_resume,
1311 },
1312};
1313
Russell Kinga65bb922014-03-15 16:48:01 +00001314#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
Rob Herring8c369262011-08-03 18:12:05 +01001315static const struct of_device_id l2x0_ids[] __initconst = {
Russell King6a28cf52014-03-15 18:55:53 +00001316 L2C_ID("arm,l210-cache", of_l2c210_data),
Russell King733c6bb2014-03-15 21:29:28 +00001317 L2C_ID("arm,l220-cache", of_l2c220_data),
Russell Kingf7773322014-03-15 20:51:47 +00001318 L2C_ID("arm,pl310-cache", of_l2c310_data),
Russell Kingc02642b2014-03-15 16:47:54 +00001319 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1320 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1321 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1322 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
Russell Kinga65bb922014-03-15 16:48:01 +00001323 /* Deprecated IDs */
Russell Kingc02642b2014-03-15 16:47:54 +00001324 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
Rob Herring8c369262011-08-03 18:12:05 +01001325 {}
1326};
1327
Russell King3e175ca2011-09-18 11:27:30 +01001328int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +01001329{
Russell Kingc02642b2014-03-15 16:47:54 +00001330 const struct l2c_init_data *data;
Rob Herring8c369262011-08-03 18:12:05 +01001331 struct device_node *np;
Barry Song91c2ebb2011-09-30 14:43:12 +01001332 struct resource res;
Russell King96054b02014-03-15 16:47:52 +00001333 u32 cache_id;
Rob Herring8c369262011-08-03 18:12:05 +01001334
1335 np = of_find_matching_node(NULL, l2x0_ids);
1336 if (!np)
1337 return -ENODEV;
Barry Song91c2ebb2011-09-30 14:43:12 +01001338
1339 if (of_address_to_resource(np, 0, &res))
1340 return -ENODEV;
1341
1342 l2x0_base = ioremap(res.start, resource_size(&res));
Rob Herring8c369262011-08-03 18:12:05 +01001343 if (!l2x0_base)
1344 return -ENOMEM;
1345
Barry Song91c2ebb2011-09-30 14:43:12 +01001346 l2x0_saved_regs.phy_base = res.start;
1347
1348 data = of_match_node(l2x0_ids, np)->data;
1349
Rob Herring8c369262011-08-03 18:12:05 +01001350 /* L2 configuration can only be changed if the cache is disabled */
Russell King40266d62014-03-15 16:47:59 +00001351 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
Russell Kingc02642b2014-03-15 16:47:54 +00001352 if (data->of_parse)
1353 data->of_parse(np, &aux_val, &aux_mask);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001354
Russell King96054b02014-03-15 16:47:52 +00001355 if (cache_id_part_number_from_dt)
1356 cache_id = cache_id_part_number_from_dt;
1357 else
1358 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1359
1360 __l2c_init(data, aux_val, aux_mask, cache_id);
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001361
Rob Herring8c369262011-08-03 18:12:05 +01001362 return 0;
1363}
1364#endif