blob: a1313d20f2058a2a151bca6844f3fd840295d835 [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
Rob Herring8c369262011-08-03 18:12:05 +010019#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010020#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Rob Herring8c369262011-08-03 18:12:05 +010023#include <linux/of.h>
24#include <linux/of_address.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025
26#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010027#include <asm/hardware/cache-l2x0.h>
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +010028#include "cache-tauros3.h"
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010029#include "cache-aurora-l2.h"
Catalin Marinas382266a2007-02-05 14:48:19 +010030
Russell Kingc02642b2014-03-15 16:47:54 +000031struct l2c_init_data {
32 void (*of_parse)(const struct device_node *, u32 *, u32 *);
33 void (*save)(void);
34 struct outer_cache_fns outer_cache;
35};
36
Catalin Marinas382266a2007-02-05 14:48:19 +010037#define CACHE_LINE_SIZE 32
38
39static void __iomem *l2x0_base;
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050040static DEFINE_RAW_SPINLOCK(l2x0_lock);
Russell King3e175ca2011-09-18 11:27:30 +010041static u32 l2x0_way_mask; /* Bitmask of active ways */
42static u32 l2x0_size;
Will Deaconf154fe92012-04-20 17:21:08 +010043static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
Catalin Marinas382266a2007-02-05 14:48:19 +010044
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010045/* Aurora don't have the cache ID register available, so we have to
46 * pass it though the device tree */
47static u32 cache_id_part_number_from_dt;
48
Barry Song91c2ebb2011-09-30 14:43:12 +010049struct l2x0_regs l2x0_saved_regs;
50
Gregory CLEMENT6248d062012-10-01 10:56:42 +010051static bool of_init = false;
52
Russell King37abcdb2014-03-15 16:47:50 +000053/*
54 * Common code for all cache controllers.
55 */
Catalin Marinas9a6655e2010-08-31 13:05:22 +010056static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010057{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010058 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010059 while (readl_relaxed(reg) & mask)
Barry Song1caf3092011-09-09 10:30:34 +010060 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010061}
62
Russell King2b2a87a2014-03-16 17:19:21 +000063/*
64 * This should only be called when we have a requirement that the
65 * register be written due to a work-around, as platforms running
66 * in non-secure mode may not be able to access this register.
67 */
68static inline void l2c_set_debug(void __iomem *base, unsigned long val)
69{
70 outer_cache.set_debug(val);
71}
72
Russell King37abcdb2014-03-15 16:47:50 +000073static inline void l2c_unlock(void __iomem *base, unsigned num)
74{
75 unsigned i;
76
77 for (i = 0; i < num; i++) {
78 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
79 i * L2X0_LOCKDOWN_STRIDE);
80 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
81 i * L2X0_LOCKDOWN_STRIDE);
82 }
83}
84
Catalin Marinas9a6655e2010-08-31 13:05:22 +010085#ifdef CONFIG_CACHE_PL310
86static inline void cache_wait(void __iomem *reg, unsigned long mask)
87{
88 /* cache operations by line are atomic on PL310 */
89}
90#else
91#define cache_wait cache_wait_way
92#endif
93
Catalin Marinas382266a2007-02-05 14:48:19 +010094static inline void cache_sync(void)
95{
Russell King3d107432009-11-19 11:41:09 +000096 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +010097
Will Deaconf154fe92012-04-20 17:21:08 +010098 writel_relaxed(0, base + sync_reg_offset);
Russell King3d107432009-11-19 11:41:09 +000099 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100100}
101
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100102static inline void l2x0_clean_line(unsigned long addr)
103{
104 void __iomem *base = l2x0_base;
105 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100106 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100107}
108
109static inline void l2x0_inv_line(unsigned long addr)
110{
111 void __iomem *base = l2x0_base;
112 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100113 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100114}
115
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100116#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Will Deaconab4d5362012-04-20 17:22:11 +0100117static inline void debug_writel(unsigned long val)
118{
119 if (outer_cache.set_debug)
Russell King2b2a87a2014-03-16 17:19:21 +0000120 l2c_set_debug(l2x0_base, val);
Will Deaconab4d5362012-04-20 17:22:11 +0100121}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100122
Will Deaconab4d5362012-04-20 17:22:11 +0100123static void pl310_set_debug(unsigned long val)
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100124{
125 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
126}
127#else
128/* Optimised out for non-errata case */
129static inline void debug_writel(unsigned long val)
130{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100131}
132
Will Deaconab4d5362012-04-20 17:22:11 +0100133#define pl310_set_debug NULL
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100134#endif
135
136#ifdef CONFIG_PL310_ERRATA_588369
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100137static inline void l2x0_flush_line(unsigned long addr)
138{
139 void __iomem *base = l2x0_base;
140
141 /* Clean by PA followed by Invalidate by PA */
142 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100143 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100144 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100145 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100146}
147#else
148
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100149static inline void l2x0_flush_line(unsigned long addr)
150{
151 void __iomem *base = l2x0_base;
152 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100153 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100154}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100155#endif
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100156
Catalin Marinas23107c52010-03-24 16:48:53 +0100157static void l2x0_cache_sync(void)
158{
159 unsigned long flags;
160
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500161 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100162 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500163 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100164}
165
Will Deacon38a89142011-07-01 14:36:19 +0100166static void __l2x0_flush_all(void)
167{
168 debug_writel(0x03);
169 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
170 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
171 cache_sync();
172 debug_writel(0x00);
173}
174
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530175static void l2x0_flush_all(void)
176{
177 unsigned long flags;
178
179 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500180 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100181 __l2x0_flush_all();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500182 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530183}
184
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530185static void l2x0_clean_all(void)
186{
187 unsigned long flags;
188
189 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500190 raw_spin_lock_irqsave(&l2x0_lock, flags);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530191 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
192 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
193 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500194 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530195}
196
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530197static void l2x0_inv_all(void)
Catalin Marinas382266a2007-02-05 14:48:19 +0100198{
Russell King0eb948d2009-11-19 11:12:15 +0000199 unsigned long flags;
200
Catalin Marinas382266a2007-02-05 14:48:19 +0100201 /* invalidate all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500202 raw_spin_lock_irqsave(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530203 /* Invalidating when L2 is enabled is a nono */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100204 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
Catalin Marinas6775a552010-07-28 22:01:25 +0100205 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100206 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
Catalin Marinas382266a2007-02-05 14:48:19 +0100207 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500208 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100209}
210
211static void l2x0_inv_range(unsigned long start, unsigned long end)
212{
Russell King3d107432009-11-19 11:41:09 +0000213 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000214 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100215
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500216 raw_spin_lock_irqsave(&l2x0_lock, flags);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100217 if (start & (CACHE_LINE_SIZE - 1)) {
218 start &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100219 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100220 l2x0_flush_line(start);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100221 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100222 start += CACHE_LINE_SIZE;
223 }
224
225 if (end & (CACHE_LINE_SIZE - 1)) {
226 end &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100227 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100228 l2x0_flush_line(end);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100229 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100230 }
231
Russell King0eb948d2009-11-19 11:12:15 +0000232 while (start < end) {
233 unsigned long blk_end = start + min(end - start, 4096UL);
234
235 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100236 l2x0_inv_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000237 start += CACHE_LINE_SIZE;
238 }
239
240 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500241 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
242 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000243 }
244 }
Russell King3d107432009-11-19 11:41:09 +0000245 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100246 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500247 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100248}
249
250static void l2x0_clean_range(unsigned long start, unsigned long end)
251{
Russell King3d107432009-11-19 11:41:09 +0000252 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000253 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100254
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530255 if ((end - start) >= l2x0_size) {
256 l2x0_clean_all();
257 return;
258 }
259
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500260 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100261 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000262 while (start < end) {
263 unsigned long blk_end = start + min(end - start, 4096UL);
264
265 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100266 l2x0_clean_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000267 start += CACHE_LINE_SIZE;
268 }
269
270 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500271 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
272 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000273 }
274 }
Russell King3d107432009-11-19 11:41:09 +0000275 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100276 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500277 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100278}
279
280static void l2x0_flush_range(unsigned long start, unsigned long end)
281{
Russell King3d107432009-11-19 11:41:09 +0000282 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000283 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100284
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530285 if ((end - start) >= l2x0_size) {
286 l2x0_flush_all();
287 return;
288 }
289
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500290 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100291 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000292 while (start < end) {
293 unsigned long blk_end = start + min(end - start, 4096UL);
294
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100295 debug_writel(0x03);
Russell King0eb948d2009-11-19 11:12:15 +0000296 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100297 l2x0_flush_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000298 start += CACHE_LINE_SIZE;
299 }
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100300 debug_writel(0x00);
Russell King0eb948d2009-11-19 11:12:15 +0000301
302 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500303 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
304 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000305 }
306 }
Russell King3d107432009-11-19 11:41:09 +0000307 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100308 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500309 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100310}
311
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530312static void l2x0_disable(void)
313{
314 unsigned long flags;
315
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500316 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100317 __l2x0_flush_all();
318 writel_relaxed(0, l2x0_base + L2X0_CTRL);
Will Deacon9781aa82013-06-12 09:59:59 +0100319 dsb(st);
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500320 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530321}
322
Russell King3e175ca2011-09-18 11:27:30 +0100323static void l2x0_unlock(u32 cache_id)
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100324{
325 int lockregs;
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100326
Rob Herring6e7acee2013-03-25 17:02:48 +0100327 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100328 case L2X0_CACHE_ID_PART_L310:
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100329 lockregs = 8;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100330 break;
331 case AURORA_CACHE_ID:
332 lockregs = 4;
333 break;
334 default:
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100335 /* L210 and unknown types */
336 lockregs = 1;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100337 break;
338 }
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100339
Russell King37abcdb2014-03-15 16:47:50 +0000340 l2c_unlock(l2x0_base, lockregs);
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100341}
342
Russell King3e175ca2011-09-18 11:27:30 +0100343void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
Catalin Marinas382266a2007-02-05 14:48:19 +0100344{
Russell King3e175ca2011-09-18 11:27:30 +0100345 u32 aux;
346 u32 cache_id;
347 u32 way_size = 0;
Jason McMullan64039be2010-05-05 18:59:37 +0100348 int ways;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100349 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
Jason McMullan64039be2010-05-05 18:59:37 +0100350 const char *type;
Catalin Marinas382266a2007-02-05 14:48:19 +0100351
352 l2x0_base = base;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100353 if (cache_id_part_number_from_dt)
354 cache_id = cache_id_part_number_from_dt;
355 else
Rob Herring6e7acee2013-03-25 17:02:48 +0100356 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
Catalin Marinas6775a552010-07-28 22:01:25 +0100357 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100358
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100359 aux &= aux_mask;
360 aux |= aux_val;
361
Jason McMullan64039be2010-05-05 18:59:37 +0100362 /* Determine the number of ways */
Rob Herring6e7acee2013-03-25 17:02:48 +0100363 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100364 case L2X0_CACHE_ID_PART_L310:
365 if (aux & (1 << 16))
366 ways = 16;
367 else
368 ways = 8;
369 type = "L310";
Will Deaconf154fe92012-04-20 17:21:08 +0100370#ifdef CONFIG_PL310_ERRATA_753970
371 /* Unmapped register. */
372 sync_reg_offset = L2X0_DUMMY_REG;
373#endif
Rob Herring74ddcdb2012-12-21 22:42:39 +0100374 if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
375 outer_cache.set_debug = pl310_set_debug;
Jason McMullan64039be2010-05-05 18:59:37 +0100376 break;
377 case L2X0_CACHE_ID_PART_L210:
378 ways = (aux >> 13) & 0xf;
379 type = "L210";
380 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100381
382 case AURORA_CACHE_ID:
383 sync_reg_offset = AURORA_SYNC_REG;
384 ways = (aux >> 13) & 0xf;
385 ways = 2 << ((ways + 1) >> 2);
386 way_size_shift = AURORA_WAY_SIZE_SHIFT;
387 type = "Aurora";
388 break;
Jason McMullan64039be2010-05-05 18:59:37 +0100389 default:
390 /* Assume unknown chips have 8 ways */
391 ways = 8;
392 type = "L2x0 series";
393 break;
394 }
395
396 l2x0_way_mask = (1 << ways) - 1;
397
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100398 /*
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530399 * L2 cache Size = Way size * Number of ways
400 */
401 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100402 way_size = 1 << (way_size + way_size_shift);
403
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530404 l2x0_size = ways * way_size * SZ_1K;
405
406 /*
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100407 * Check if l2x0 controller is already enabled.
408 * If you are booting from non-secure mode
409 * accessing the below registers will fault.
410 */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100411 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100412 /* Make sure that I&D is not locked down when starting */
413 l2x0_unlock(cache_id);
Catalin Marinas382266a2007-02-05 14:48:19 +0100414
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100415 /* l2x0 controller is disabled */
Catalin Marinas6775a552010-07-28 22:01:25 +0100416 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
Catalin Marinas382266a2007-02-05 14:48:19 +0100417
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100418 l2x0_inv_all();
419
420 /* enable L2X0 */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100421 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100422 }
Catalin Marinas382266a2007-02-05 14:48:19 +0100423
Yilu Mao9d4876f2012-09-03 09:14:56 +0100424 /* Re-read it in case some bits are reserved. */
425 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
426
427 /* Save the value for resuming. */
428 l2x0_saved_regs.aux_ctrl = aux;
429
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100430 if (!of_init) {
431 outer_cache.inv_range = l2x0_inv_range;
432 outer_cache.clean_range = l2x0_clean_range;
433 outer_cache.flush_range = l2x0_flush_range;
434 outer_cache.sync = l2x0_cache_sync;
435 outer_cache.flush_all = l2x0_flush_all;
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100436 outer_cache.disable = l2x0_disable;
437 }
Catalin Marinas382266a2007-02-05 14:48:19 +0100438
Fabio Estevamc477b8d2013-08-16 13:04:32 +0100439 pr_info("%s cache controller enabled\n", type);
440 pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
441 ways, cache_id, aux, l2x0_size >> 10);
Catalin Marinas382266a2007-02-05 14:48:19 +0100442}
Rob Herring8c369262011-08-03 18:12:05 +0100443
444#ifdef CONFIG_OF
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100445static int l2_wt_override;
446
447/*
448 * Note that the end addresses passed to Linux primitives are
449 * noninclusive, while the hardware cache range operations use
450 * inclusive start and end addresses.
451 */
452static unsigned long calc_range_end(unsigned long start, unsigned long end)
453{
454 /*
455 * Limit the number of cache lines processed at once,
456 * since cache range operations stall the CPU pipeline
457 * until completion.
458 */
459 if (end > start + MAX_RANGE_SIZE)
460 end = start + MAX_RANGE_SIZE;
461
462 /*
463 * Cache range operations can't straddle a page boundary.
464 */
465 if (end > PAGE_ALIGN(start+1))
466 end = PAGE_ALIGN(start+1);
467
468 return end;
469}
470
471/*
472 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
473 * and range operations only do a TLB lookup on the start address.
474 */
475static void aurora_pa_range(unsigned long start, unsigned long end,
476 unsigned long offset)
477{
478 unsigned long flags;
479
480 raw_spin_lock_irqsave(&l2x0_lock, flags);
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +0100481 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
482 writel_relaxed(end, l2x0_base + offset);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100483 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
484
485 cache_sync();
486}
487
488static void aurora_inv_range(unsigned long start, unsigned long end)
489{
490 /*
491 * round start and end adresses up to cache line size
492 */
493 start &= ~(CACHE_LINE_SIZE - 1);
494 end = ALIGN(end, CACHE_LINE_SIZE);
495
496 /*
497 * Invalidate all full cache lines between 'start' and 'end'.
498 */
499 while (start < end) {
500 unsigned long range_end = calc_range_end(start, end);
501 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
502 AURORA_INVAL_RANGE_REG);
503 start = range_end;
504 }
505}
506
507static void aurora_clean_range(unsigned long start, unsigned long end)
508{
509 /*
510 * If L2 is forced to WT, the L2 will always be clean and we
511 * don't need to do anything here.
512 */
513 if (!l2_wt_override) {
514 start &= ~(CACHE_LINE_SIZE - 1);
515 end = ALIGN(end, CACHE_LINE_SIZE);
516 while (start != end) {
517 unsigned long range_end = calc_range_end(start, end);
518 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
519 AURORA_CLEAN_RANGE_REG);
520 start = range_end;
521 }
522 }
523}
524
525static void aurora_flush_range(unsigned long start, unsigned long end)
526{
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100527 start &= ~(CACHE_LINE_SIZE - 1);
528 end = ALIGN(end, CACHE_LINE_SIZE);
529 while (start != end) {
530 unsigned long range_end = calc_range_end(start, end);
531 /*
532 * If L2 is forced to WT, the L2 will always be clean and we
533 * just need to invalidate.
534 */
535 if (l2_wt_override)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100536 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100537 AURORA_INVAL_RANGE_REG);
538 else
539 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
540 AURORA_FLUSH_RANGE_REG);
541 start = range_end;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100542 }
543}
544
Christian Daudt3b656fe2013-05-09 22:21:01 +0100545/*
546 * For certain Broadcom SoCs, depending on the address range, different offsets
547 * need to be added to the address before passing it to L2 for
548 * invalidation/clean/flush
549 *
550 * Section Address Range Offset EMI
551 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
552 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
553 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
554 *
555 * When the start and end addresses have crossed two different sections, we
556 * need to break the L2 operation into two, each within its own section.
557 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
558 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
559 * 0xC0000000 - 0xC0001000
560 *
561 * Note 1:
562 * By breaking a single L2 operation into two, we may potentially suffer some
563 * performance hit, but keep in mind the cross section case is very rare
564 *
565 * Note 2:
566 * We do not need to handle the case when the start address is in
567 * Section 1 and the end address is in Section 3, since it is not a valid use
568 * case
569 *
570 * Note 3:
571 * Section 1 in practical terms can no longer be used on rev A2. Because of
572 * that the code does not need to handle section 1 at all.
573 *
574 */
575#define BCM_SYS_EMI_START_ADDR 0x40000000UL
576#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
577
578#define BCM_SYS_EMI_OFFSET 0x40000000UL
579#define BCM_VC_EMI_OFFSET 0x80000000UL
580
581static inline int bcm_addr_is_sys_emi(unsigned long addr)
582{
583 return (addr >= BCM_SYS_EMI_START_ADDR) &&
584 (addr < BCM_VC_EMI_SEC3_START_ADDR);
585}
586
587static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
588{
589 if (bcm_addr_is_sys_emi(addr))
590 return addr + BCM_SYS_EMI_OFFSET;
591 else
592 return addr + BCM_VC_EMI_OFFSET;
593}
594
595static void bcm_inv_range(unsigned long start, unsigned long end)
596{
597 unsigned long new_start, new_end;
598
599 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
600
601 if (unlikely(end <= start))
602 return;
603
604 new_start = bcm_l2_phys_addr(start);
605 new_end = bcm_l2_phys_addr(end);
606
607 /* normal case, no cross section between start and end */
608 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
609 l2x0_inv_range(new_start, new_end);
610 return;
611 }
612
613 /* They cross sections, so it can only be a cross from section
614 * 2 to section 3
615 */
616 l2x0_inv_range(new_start,
617 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
618 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
619 new_end);
620}
621
622static void bcm_clean_range(unsigned long start, unsigned long end)
623{
624 unsigned long new_start, new_end;
625
626 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
627
628 if (unlikely(end <= start))
629 return;
630
631 if ((end - start) >= l2x0_size) {
632 l2x0_clean_all();
633 return;
634 }
635
636 new_start = bcm_l2_phys_addr(start);
637 new_end = bcm_l2_phys_addr(end);
638
639 /* normal case, no cross section between start and end */
640 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
641 l2x0_clean_range(new_start, new_end);
642 return;
643 }
644
645 /* They cross sections, so it can only be a cross from section
646 * 2 to section 3
647 */
648 l2x0_clean_range(new_start,
649 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
650 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
651 new_end);
652}
653
654static void bcm_flush_range(unsigned long start, unsigned long end)
655{
656 unsigned long new_start, new_end;
657
658 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
659
660 if (unlikely(end <= start))
661 return;
662
663 if ((end - start) >= l2x0_size) {
664 l2x0_flush_all();
665 return;
666 }
667
668 new_start = bcm_l2_phys_addr(start);
669 new_end = bcm_l2_phys_addr(end);
670
671 /* normal case, no cross section between start and end */
672 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
673 l2x0_flush_range(new_start, new_end);
674 return;
675 }
676
677 /* They cross sections, so it can only be a cross from section
678 * 2 to section 3
679 */
680 l2x0_flush_range(new_start,
681 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
682 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
683 new_end);
684}
685
Russell Kingc02642b2014-03-15 16:47:54 +0000686static void __init l2x0_of_parse(const struct device_node *np,
Russell King3e175ca2011-09-18 11:27:30 +0100687 u32 *aux_val, u32 *aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +0100688{
689 u32 data[2] = { 0, 0 };
690 u32 tag = 0;
691 u32 dirty = 0;
692 u32 val = 0, mask = 0;
693
694 of_property_read_u32(np, "arm,tag-latency", &tag);
695 if (tag) {
696 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
697 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
698 }
699
700 of_property_read_u32_array(np, "arm,data-latency",
701 data, ARRAY_SIZE(data));
702 if (data[0] && data[1]) {
703 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
704 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
705 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
706 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
707 }
708
709 of_property_read_u32(np, "arm,dirty-latency", &dirty);
710 if (dirty) {
711 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
712 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
713 }
714
715 *aux_val &= ~mask;
716 *aux_val |= val;
717 *aux_mask &= ~mask;
718}
719
Russell Kingc02642b2014-03-15 16:47:54 +0000720static void __init pl310_of_parse(const struct device_node *np,
Russell King3e175ca2011-09-18 11:27:30 +0100721 u32 *aux_val, u32 *aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +0100722{
723 u32 data[3] = { 0, 0, 0 };
724 u32 tag[3] = { 0, 0, 0 };
725 u32 filter[2] = { 0, 0 };
726
727 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
728 if (tag[0] && tag[1] && tag[2])
729 writel_relaxed(
730 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
731 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
732 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
733 l2x0_base + L2X0_TAG_LATENCY_CTRL);
734
735 of_property_read_u32_array(np, "arm,data-latency",
736 data, ARRAY_SIZE(data));
737 if (data[0] && data[1] && data[2])
738 writel_relaxed(
739 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
740 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
741 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
742 l2x0_base + L2X0_DATA_LATENCY_CTRL);
743
744 of_property_read_u32_array(np, "arm,filter-ranges",
745 filter, ARRAY_SIZE(filter));
Barry Song74d41f32011-09-14 03:20:01 +0100746 if (filter[1]) {
Rob Herring8c369262011-08-03 18:12:05 +0100747 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
748 l2x0_base + L2X0_ADDR_FILTER_END);
749 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
750 l2x0_base + L2X0_ADDR_FILTER_START);
751 }
752}
753
Barry Song91c2ebb2011-09-30 14:43:12 +0100754static void __init pl310_save(void)
755{
756 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
757 L2X0_CACHE_ID_RTL_MASK;
758
759 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
760 L2X0_TAG_LATENCY_CTRL);
761 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
762 L2X0_DATA_LATENCY_CTRL);
763 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
764 L2X0_ADDR_FILTER_END);
765 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
766 L2X0_ADDR_FILTER_START);
767
768 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
769 /*
770 * From r2p0, there is Prefetch offset/control register
771 */
772 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
773 L2X0_PREFETCH_CTRL);
774 /*
775 * From r3p0, there is Power control register
776 */
777 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
778 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
779 L2X0_POWER_CTRL);
780 }
781}
782
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100783static void aurora_save(void)
784{
785 l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
786 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
787}
788
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +0100789static void __init tauros3_save(void)
790{
791 l2x0_saved_regs.aux2_ctrl =
792 readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL);
793 l2x0_saved_regs.prefetch_ctrl =
794 readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
795}
796
Barry Song91c2ebb2011-09-30 14:43:12 +0100797static void l2x0_resume(void)
798{
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100799 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Barry Song91c2ebb2011-09-30 14:43:12 +0100800 /* restore aux ctrl and enable l2 */
801 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
802
803 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
804 L2X0_AUX_CTRL);
805
806 l2x0_inv_all();
807
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100808 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
Barry Song91c2ebb2011-09-30 14:43:12 +0100809 }
810}
811
812static void pl310_resume(void)
813{
814 u32 l2x0_revision;
815
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100816 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Barry Song91c2ebb2011-09-30 14:43:12 +0100817 /* restore pl310 setup */
818 writel_relaxed(l2x0_saved_regs.tag_latency,
819 l2x0_base + L2X0_TAG_LATENCY_CTRL);
820 writel_relaxed(l2x0_saved_regs.data_latency,
821 l2x0_base + L2X0_DATA_LATENCY_CTRL);
822 writel_relaxed(l2x0_saved_regs.filter_end,
823 l2x0_base + L2X0_ADDR_FILTER_END);
824 writel_relaxed(l2x0_saved_regs.filter_start,
825 l2x0_base + L2X0_ADDR_FILTER_START);
826
827 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
828 L2X0_CACHE_ID_RTL_MASK;
829
830 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
831 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
832 l2x0_base + L2X0_PREFETCH_CTRL);
833 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
834 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
835 l2x0_base + L2X0_POWER_CTRL);
836 }
837 }
838
839 l2x0_resume();
840}
841
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100842static void aurora_resume(void)
843{
844 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +0100845 writel_relaxed(l2x0_saved_regs.aux_ctrl,
846 l2x0_base + L2X0_AUX_CTRL);
847 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100848 }
849}
850
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +0100851static void tauros3_resume(void)
852{
853 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
854 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
855 l2x0_base + TAUROS3_AUX2_CTRL);
856 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
857 l2x0_base + L2X0_PREFETCH_CTRL);
858 }
859
860 l2x0_resume();
861}
862
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100863static void __init aurora_broadcast_l2_commands(void)
864{
865 __u32 u;
866 /* Enable Broadcasting of cache commands to L2*/
867 __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
868 u |= AURORA_CTRL_FW; /* Set the FW bit */
869 __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
870 isb();
871}
872
Russell Kingc02642b2014-03-15 16:47:54 +0000873static void __init aurora_of_parse(const struct device_node *np,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100874 u32 *aux_val, u32 *aux_mask)
875{
876 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
877 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
878
879 of_property_read_u32(np, "cache-id-part",
880 &cache_id_part_number_from_dt);
881
882 /* Determine and save the write policy */
883 l2_wt_override = of_property_read_bool(np, "wt-override");
884
885 if (l2_wt_override) {
886 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
887 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
888 }
889
890 *aux_val &= ~mask;
891 *aux_val |= val;
892 *aux_mask &= ~mask;
893}
894
Russell Kingc02642b2014-03-15 16:47:54 +0000895static const struct l2c_init_data of_pl310_data __initconst = {
896 .of_parse = pl310_of_parse,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100897 .save = pl310_save,
898 .outer_cache = {
Russell Kingce841302014-03-15 16:48:03 +0000899 .inv_range = l2x0_inv_range,
900 .clean_range = l2x0_clean_range,
901 .flush_range = l2x0_flush_range,
902 .flush_all = l2x0_flush_all,
903 .disable = l2x0_disable,
904 .sync = l2x0_cache_sync,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100905 .resume = pl310_resume,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100906 },
Barry Song91c2ebb2011-09-30 14:43:12 +0100907};
908
Russell Kingc02642b2014-03-15 16:47:54 +0000909static const struct l2c_init_data of_l2x0_data __initconst = {
910 .of_parse = l2x0_of_parse,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100911 .outer_cache = {
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100912 .inv_range = l2x0_inv_range,
913 .clean_range = l2x0_clean_range,
914 .flush_range = l2x0_flush_range,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100915 .flush_all = l2x0_flush_all,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100916 .disable = l2x0_disable,
Russell Kingce841302014-03-15 16:48:03 +0000917 .sync = l2x0_cache_sync,
918 .resume = l2x0_resume,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100919 },
Barry Song91c2ebb2011-09-30 14:43:12 +0100920};
921
Russell Kingc02642b2014-03-15 16:47:54 +0000922static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
923 .of_parse = aurora_of_parse,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100924 .save = aurora_save,
925 .outer_cache = {
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100926 .inv_range = aurora_inv_range,
927 .clean_range = aurora_clean_range,
928 .flush_range = aurora_flush_range,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100929 .flush_all = l2x0_flush_all,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100930 .disable = l2x0_disable,
Russell Kingce841302014-03-15 16:48:03 +0000931 .sync = l2x0_cache_sync,
932 .resume = aurora_resume,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100933 },
934};
935
Russell Kingc02642b2014-03-15 16:47:54 +0000936static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
937 .of_parse = aurora_of_parse,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100938 .save = aurora_save,
939 .outer_cache = {
940 .resume = aurora_resume,
941 },
942};
943
Russell Kingc02642b2014-03-15 16:47:54 +0000944static const struct l2c_init_data of_tauros3_data __initconst = {
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +0100945 .save = tauros3_save,
946 /* Tauros3 broadcasts L1 cache operations to L2 */
947 .outer_cache = {
948 .resume = tauros3_resume,
949 },
950};
951
Russell Kingc02642b2014-03-15 16:47:54 +0000952static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
953 .of_parse = pl310_of_parse,
Christian Daudt3b656fe2013-05-09 22:21:01 +0100954 .save = pl310_save,
955 .outer_cache = {
Christian Daudt3b656fe2013-05-09 22:21:01 +0100956 .inv_range = bcm_inv_range,
957 .clean_range = bcm_clean_range,
958 .flush_range = bcm_flush_range,
Christian Daudt3b656fe2013-05-09 22:21:01 +0100959 .flush_all = l2x0_flush_all,
Christian Daudt3b656fe2013-05-09 22:21:01 +0100960 .disable = l2x0_disable,
Russell Kingce841302014-03-15 16:48:03 +0000961 .sync = l2x0_cache_sync,
962 .resume = pl310_resume,
Christian Daudt3b656fe2013-05-09 22:21:01 +0100963 },
964};
965
Russell Kinga65bb922014-03-15 16:48:01 +0000966#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
Rob Herring8c369262011-08-03 18:12:05 +0100967static const struct of_device_id l2x0_ids[] __initconst = {
Russell Kingc02642b2014-03-15 16:47:54 +0000968 L2C_ID("arm,l210-cache", of_l2x0_data),
969 L2C_ID("arm,l220-cache", of_l2x0_data),
970 L2C_ID("arm,pl310-cache", of_pl310_data),
971 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
972 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
973 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
974 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
Russell Kinga65bb922014-03-15 16:48:01 +0000975 /* Deprecated IDs */
Russell Kingc02642b2014-03-15 16:47:54 +0000976 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
Rob Herring8c369262011-08-03 18:12:05 +0100977 {}
978};
979
Russell King3e175ca2011-09-18 11:27:30 +0100980int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +0100981{
Russell Kingc02642b2014-03-15 16:47:54 +0000982 const struct l2c_init_data *data;
Rob Herring8c369262011-08-03 18:12:05 +0100983 struct device_node *np;
Barry Song91c2ebb2011-09-30 14:43:12 +0100984 struct resource res;
Rob Herring8c369262011-08-03 18:12:05 +0100985
986 np = of_find_matching_node(NULL, l2x0_ids);
987 if (!np)
988 return -ENODEV;
Barry Song91c2ebb2011-09-30 14:43:12 +0100989
990 if (of_address_to_resource(np, 0, &res))
991 return -ENODEV;
992
993 l2x0_base = ioremap(res.start, resource_size(&res));
Rob Herring8c369262011-08-03 18:12:05 +0100994 if (!l2x0_base)
995 return -ENOMEM;
996
Barry Song91c2ebb2011-09-30 14:43:12 +0100997 l2x0_saved_regs.phy_base = res.start;
998
999 data = of_match_node(l2x0_ids, np)->data;
1000
Rob Herring8c369262011-08-03 18:12:05 +01001001 /* L2 configuration can only be changed if the cache is disabled */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001002 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Russell Kingc02642b2014-03-15 16:47:54 +00001003 if (data->of_parse)
1004 data->of_parse(np, &aux_val, &aux_mask);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001005
1006 /* For aurora cache in no outer mode select the
1007 * correct mode using the coprocessor*/
Russell Kingc02642b2014-03-15 16:47:54 +00001008 if (data == &of_aurora_no_outer_data)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001009 aurora_broadcast_l2_commands();
Rob Herring8c369262011-08-03 18:12:05 +01001010 }
Barry Song91c2ebb2011-09-30 14:43:12 +01001011
1012 if (data->save)
1013 data->save();
1014
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001015 of_init = true;
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001016 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
Rob Herring6e7acee2013-03-25 17:02:48 +01001017 l2x0_init(l2x0_base, aux_val, aux_mask);
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001018
Rob Herring8c369262011-08-03 18:12:05 +01001019 return 0;
1020}
1021#endif