blob: 595c50519e410ff20fe768d8945dc8c9bdc6bd8b [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
Rob Herring8c369262011-08-03 18:12:05 +010019#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010020#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Rob Herring8c369262011-08-03 18:12:05 +010023#include <linux/of.h>
24#include <linux/of_address.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025
26#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010027#include <asm/hardware/cache-l2x0.h>
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +010028#include "cache-tauros3.h"
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010029#include "cache-aurora-l2.h"
Catalin Marinas382266a2007-02-05 14:48:19 +010030
Russell Kingc02642b2014-03-15 16:47:54 +000031struct l2c_init_data {
32 void (*of_parse)(const struct device_node *, u32 *, u32 *);
33 void (*save)(void);
34 struct outer_cache_fns outer_cache;
35};
36
Catalin Marinas382266a2007-02-05 14:48:19 +010037#define CACHE_LINE_SIZE 32
38
39static void __iomem *l2x0_base;
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050040static DEFINE_RAW_SPINLOCK(l2x0_lock);
Russell King3e175ca2011-09-18 11:27:30 +010041static u32 l2x0_way_mask; /* Bitmask of active ways */
42static u32 l2x0_size;
Will Deaconf154fe92012-04-20 17:21:08 +010043static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
Catalin Marinas382266a2007-02-05 14:48:19 +010044
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010045/* Aurora don't have the cache ID register available, so we have to
46 * pass it though the device tree */
47static u32 cache_id_part_number_from_dt;
48
Barry Song91c2ebb2011-09-30 14:43:12 +010049struct l2x0_regs l2x0_saved_regs;
50
Gregory CLEMENT6248d062012-10-01 10:56:42 +010051static bool of_init = false;
52
Catalin Marinas9a6655e2010-08-31 13:05:22 +010053static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010054{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010055 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010056 while (readl_relaxed(reg) & mask)
Barry Song1caf3092011-09-09 10:30:34 +010057 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010058}
59
Russell King2b2a87a2014-03-16 17:19:21 +000060/*
61 * This should only be called when we have a requirement that the
62 * register be written due to a work-around, as platforms running
63 * in non-secure mode may not be able to access this register.
64 */
65static inline void l2c_set_debug(void __iomem *base, unsigned long val)
66{
67 outer_cache.set_debug(val);
68}
69
Catalin Marinas9a6655e2010-08-31 13:05:22 +010070#ifdef CONFIG_CACHE_PL310
71static inline void cache_wait(void __iomem *reg, unsigned long mask)
72{
73 /* cache operations by line are atomic on PL310 */
74}
75#else
76#define cache_wait cache_wait_way
77#endif
78
Catalin Marinas382266a2007-02-05 14:48:19 +010079static inline void cache_sync(void)
80{
Russell King3d107432009-11-19 11:41:09 +000081 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +010082
Will Deaconf154fe92012-04-20 17:21:08 +010083 writel_relaxed(0, base + sync_reg_offset);
Russell King3d107432009-11-19 11:41:09 +000084 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +010085}
86
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010087static inline void l2x0_clean_line(unsigned long addr)
88{
89 void __iomem *base = l2x0_base;
90 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010091 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010092}
93
94static inline void l2x0_inv_line(unsigned long addr)
95{
96 void __iomem *base = l2x0_base;
97 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010098 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010099}
100
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100101#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Will Deaconab4d5362012-04-20 17:22:11 +0100102static inline void debug_writel(unsigned long val)
103{
104 if (outer_cache.set_debug)
Russell King2b2a87a2014-03-16 17:19:21 +0000105 l2c_set_debug(l2x0_base, val);
Will Deaconab4d5362012-04-20 17:22:11 +0100106}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100107
Will Deaconab4d5362012-04-20 17:22:11 +0100108static void pl310_set_debug(unsigned long val)
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100109{
110 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
111}
112#else
113/* Optimised out for non-errata case */
114static inline void debug_writel(unsigned long val)
115{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100116}
117
Will Deaconab4d5362012-04-20 17:22:11 +0100118#define pl310_set_debug NULL
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100119#endif
120
121#ifdef CONFIG_PL310_ERRATA_588369
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100122static inline void l2x0_flush_line(unsigned long addr)
123{
124 void __iomem *base = l2x0_base;
125
126 /* Clean by PA followed by Invalidate by PA */
127 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100128 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100129 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100130 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100131}
132#else
133
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100134static inline void l2x0_flush_line(unsigned long addr)
135{
136 void __iomem *base = l2x0_base;
137 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100138 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100139}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100140#endif
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100141
Catalin Marinas23107c52010-03-24 16:48:53 +0100142static void l2x0_cache_sync(void)
143{
144 unsigned long flags;
145
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500146 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100147 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500148 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100149}
150
Will Deacon38a89142011-07-01 14:36:19 +0100151static void __l2x0_flush_all(void)
152{
153 debug_writel(0x03);
154 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
155 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
156 cache_sync();
157 debug_writel(0x00);
158}
159
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530160static void l2x0_flush_all(void)
161{
162 unsigned long flags;
163
164 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500165 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100166 __l2x0_flush_all();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500167 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530168}
169
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530170static void l2x0_clean_all(void)
171{
172 unsigned long flags;
173
174 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500175 raw_spin_lock_irqsave(&l2x0_lock, flags);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530176 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
177 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
178 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500179 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530180}
181
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530182static void l2x0_inv_all(void)
Catalin Marinas382266a2007-02-05 14:48:19 +0100183{
Russell King0eb948d2009-11-19 11:12:15 +0000184 unsigned long flags;
185
Catalin Marinas382266a2007-02-05 14:48:19 +0100186 /* invalidate all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500187 raw_spin_lock_irqsave(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530188 /* Invalidating when L2 is enabled is a nono */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100189 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
Catalin Marinas6775a552010-07-28 22:01:25 +0100190 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100191 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
Catalin Marinas382266a2007-02-05 14:48:19 +0100192 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500193 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100194}
195
196static void l2x0_inv_range(unsigned long start, unsigned long end)
197{
Russell King3d107432009-11-19 11:41:09 +0000198 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000199 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100200
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500201 raw_spin_lock_irqsave(&l2x0_lock, flags);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100202 if (start & (CACHE_LINE_SIZE - 1)) {
203 start &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100204 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100205 l2x0_flush_line(start);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100206 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100207 start += CACHE_LINE_SIZE;
208 }
209
210 if (end & (CACHE_LINE_SIZE - 1)) {
211 end &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100212 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100213 l2x0_flush_line(end);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100214 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100215 }
216
Russell King0eb948d2009-11-19 11:12:15 +0000217 while (start < end) {
218 unsigned long blk_end = start + min(end - start, 4096UL);
219
220 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100221 l2x0_inv_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000222 start += CACHE_LINE_SIZE;
223 }
224
225 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500226 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
227 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000228 }
229 }
Russell King3d107432009-11-19 11:41:09 +0000230 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100231 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500232 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100233}
234
235static void l2x0_clean_range(unsigned long start, unsigned long end)
236{
Russell King3d107432009-11-19 11:41:09 +0000237 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000238 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100239
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530240 if ((end - start) >= l2x0_size) {
241 l2x0_clean_all();
242 return;
243 }
244
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500245 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100246 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000247 while (start < end) {
248 unsigned long blk_end = start + min(end - start, 4096UL);
249
250 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100251 l2x0_clean_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000252 start += CACHE_LINE_SIZE;
253 }
254
255 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500256 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
257 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000258 }
259 }
Russell King3d107432009-11-19 11:41:09 +0000260 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100261 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500262 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100263}
264
265static void l2x0_flush_range(unsigned long start, unsigned long end)
266{
Russell King3d107432009-11-19 11:41:09 +0000267 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000268 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100269
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530270 if ((end - start) >= l2x0_size) {
271 l2x0_flush_all();
272 return;
273 }
274
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500275 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100276 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000277 while (start < end) {
278 unsigned long blk_end = start + min(end - start, 4096UL);
279
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100280 debug_writel(0x03);
Russell King0eb948d2009-11-19 11:12:15 +0000281 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100282 l2x0_flush_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000283 start += CACHE_LINE_SIZE;
284 }
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100285 debug_writel(0x00);
Russell King0eb948d2009-11-19 11:12:15 +0000286
287 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500288 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
289 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000290 }
291 }
Russell King3d107432009-11-19 11:41:09 +0000292 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100293 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500294 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100295}
296
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530297static void l2x0_disable(void)
298{
299 unsigned long flags;
300
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500301 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100302 __l2x0_flush_all();
303 writel_relaxed(0, l2x0_base + L2X0_CTRL);
Will Deacon9781aa82013-06-12 09:59:59 +0100304 dsb(st);
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500305 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530306}
307
Russell King3e175ca2011-09-18 11:27:30 +0100308static void l2x0_unlock(u32 cache_id)
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100309{
310 int lockregs;
311 int i;
312
Rob Herring6e7acee2013-03-25 17:02:48 +0100313 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100314 case L2X0_CACHE_ID_PART_L310:
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100315 lockregs = 8;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100316 break;
317 case AURORA_CACHE_ID:
318 lockregs = 4;
319 break;
320 default:
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100321 /* L210 and unknown types */
322 lockregs = 1;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100323 break;
324 }
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100325
326 for (i = 0; i < lockregs; i++) {
327 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
328 i * L2X0_LOCKDOWN_STRIDE);
329 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
330 i * L2X0_LOCKDOWN_STRIDE);
331 }
332}
333
Russell King3e175ca2011-09-18 11:27:30 +0100334void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
Catalin Marinas382266a2007-02-05 14:48:19 +0100335{
Russell King3e175ca2011-09-18 11:27:30 +0100336 u32 aux;
337 u32 cache_id;
338 u32 way_size = 0;
Jason McMullan64039be2010-05-05 18:59:37 +0100339 int ways;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100340 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
Jason McMullan64039be2010-05-05 18:59:37 +0100341 const char *type;
Catalin Marinas382266a2007-02-05 14:48:19 +0100342
343 l2x0_base = base;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100344 if (cache_id_part_number_from_dt)
345 cache_id = cache_id_part_number_from_dt;
346 else
Rob Herring6e7acee2013-03-25 17:02:48 +0100347 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
Catalin Marinas6775a552010-07-28 22:01:25 +0100348 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100349
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100350 aux &= aux_mask;
351 aux |= aux_val;
352
Jason McMullan64039be2010-05-05 18:59:37 +0100353 /* Determine the number of ways */
Rob Herring6e7acee2013-03-25 17:02:48 +0100354 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100355 case L2X0_CACHE_ID_PART_L310:
356 if (aux & (1 << 16))
357 ways = 16;
358 else
359 ways = 8;
360 type = "L310";
Will Deaconf154fe92012-04-20 17:21:08 +0100361#ifdef CONFIG_PL310_ERRATA_753970
362 /* Unmapped register. */
363 sync_reg_offset = L2X0_DUMMY_REG;
364#endif
Rob Herring74ddcdb2012-12-21 22:42:39 +0100365 if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
366 outer_cache.set_debug = pl310_set_debug;
Jason McMullan64039be2010-05-05 18:59:37 +0100367 break;
368 case L2X0_CACHE_ID_PART_L210:
369 ways = (aux >> 13) & 0xf;
370 type = "L210";
371 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100372
373 case AURORA_CACHE_ID:
374 sync_reg_offset = AURORA_SYNC_REG;
375 ways = (aux >> 13) & 0xf;
376 ways = 2 << ((ways + 1) >> 2);
377 way_size_shift = AURORA_WAY_SIZE_SHIFT;
378 type = "Aurora";
379 break;
Jason McMullan64039be2010-05-05 18:59:37 +0100380 default:
381 /* Assume unknown chips have 8 ways */
382 ways = 8;
383 type = "L2x0 series";
384 break;
385 }
386
387 l2x0_way_mask = (1 << ways) - 1;
388
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100389 /*
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530390 * L2 cache Size = Way size * Number of ways
391 */
392 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100393 way_size = 1 << (way_size + way_size_shift);
394
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530395 l2x0_size = ways * way_size * SZ_1K;
396
397 /*
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100398 * Check if l2x0 controller is already enabled.
399 * If you are booting from non-secure mode
400 * accessing the below registers will fault.
401 */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100402 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100403 /* Make sure that I&D is not locked down when starting */
404 l2x0_unlock(cache_id);
Catalin Marinas382266a2007-02-05 14:48:19 +0100405
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100406 /* l2x0 controller is disabled */
Catalin Marinas6775a552010-07-28 22:01:25 +0100407 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
Catalin Marinas382266a2007-02-05 14:48:19 +0100408
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100409 l2x0_inv_all();
410
411 /* enable L2X0 */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100412 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100413 }
Catalin Marinas382266a2007-02-05 14:48:19 +0100414
Yilu Mao9d4876f2012-09-03 09:14:56 +0100415 /* Re-read it in case some bits are reserved. */
416 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
417
418 /* Save the value for resuming. */
419 l2x0_saved_regs.aux_ctrl = aux;
420
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100421 if (!of_init) {
422 outer_cache.inv_range = l2x0_inv_range;
423 outer_cache.clean_range = l2x0_clean_range;
424 outer_cache.flush_range = l2x0_flush_range;
425 outer_cache.sync = l2x0_cache_sync;
426 outer_cache.flush_all = l2x0_flush_all;
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100427 outer_cache.disable = l2x0_disable;
428 }
Catalin Marinas382266a2007-02-05 14:48:19 +0100429
Fabio Estevamc477b8d2013-08-16 13:04:32 +0100430 pr_info("%s cache controller enabled\n", type);
431 pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
432 ways, cache_id, aux, l2x0_size >> 10);
Catalin Marinas382266a2007-02-05 14:48:19 +0100433}
Rob Herring8c369262011-08-03 18:12:05 +0100434
435#ifdef CONFIG_OF
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100436static int l2_wt_override;
437
438/*
439 * Note that the end addresses passed to Linux primitives are
440 * noninclusive, while the hardware cache range operations use
441 * inclusive start and end addresses.
442 */
443static unsigned long calc_range_end(unsigned long start, unsigned long end)
444{
445 /*
446 * Limit the number of cache lines processed at once,
447 * since cache range operations stall the CPU pipeline
448 * until completion.
449 */
450 if (end > start + MAX_RANGE_SIZE)
451 end = start + MAX_RANGE_SIZE;
452
453 /*
454 * Cache range operations can't straddle a page boundary.
455 */
456 if (end > PAGE_ALIGN(start+1))
457 end = PAGE_ALIGN(start+1);
458
459 return end;
460}
461
462/*
463 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
464 * and range operations only do a TLB lookup on the start address.
465 */
466static void aurora_pa_range(unsigned long start, unsigned long end,
467 unsigned long offset)
468{
469 unsigned long flags;
470
471 raw_spin_lock_irqsave(&l2x0_lock, flags);
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +0100472 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
473 writel_relaxed(end, l2x0_base + offset);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100474 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
475
476 cache_sync();
477}
478
479static void aurora_inv_range(unsigned long start, unsigned long end)
480{
481 /*
482 * round start and end adresses up to cache line size
483 */
484 start &= ~(CACHE_LINE_SIZE - 1);
485 end = ALIGN(end, CACHE_LINE_SIZE);
486
487 /*
488 * Invalidate all full cache lines between 'start' and 'end'.
489 */
490 while (start < end) {
491 unsigned long range_end = calc_range_end(start, end);
492 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
493 AURORA_INVAL_RANGE_REG);
494 start = range_end;
495 }
496}
497
498static void aurora_clean_range(unsigned long start, unsigned long end)
499{
500 /*
501 * If L2 is forced to WT, the L2 will always be clean and we
502 * don't need to do anything here.
503 */
504 if (!l2_wt_override) {
505 start &= ~(CACHE_LINE_SIZE - 1);
506 end = ALIGN(end, CACHE_LINE_SIZE);
507 while (start != end) {
508 unsigned long range_end = calc_range_end(start, end);
509 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
510 AURORA_CLEAN_RANGE_REG);
511 start = range_end;
512 }
513 }
514}
515
516static void aurora_flush_range(unsigned long start, unsigned long end)
517{
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100518 start &= ~(CACHE_LINE_SIZE - 1);
519 end = ALIGN(end, CACHE_LINE_SIZE);
520 while (start != end) {
521 unsigned long range_end = calc_range_end(start, end);
522 /*
523 * If L2 is forced to WT, the L2 will always be clean and we
524 * just need to invalidate.
525 */
526 if (l2_wt_override)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100527 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100528 AURORA_INVAL_RANGE_REG);
529 else
530 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
531 AURORA_FLUSH_RANGE_REG);
532 start = range_end;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100533 }
534}
535
Christian Daudt3b656fe2013-05-09 22:21:01 +0100536/*
537 * For certain Broadcom SoCs, depending on the address range, different offsets
538 * need to be added to the address before passing it to L2 for
539 * invalidation/clean/flush
540 *
541 * Section Address Range Offset EMI
542 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
543 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
544 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
545 *
546 * When the start and end addresses have crossed two different sections, we
547 * need to break the L2 operation into two, each within its own section.
548 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
549 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
550 * 0xC0000000 - 0xC0001000
551 *
552 * Note 1:
553 * By breaking a single L2 operation into two, we may potentially suffer some
554 * performance hit, but keep in mind the cross section case is very rare
555 *
556 * Note 2:
557 * We do not need to handle the case when the start address is in
558 * Section 1 and the end address is in Section 3, since it is not a valid use
559 * case
560 *
561 * Note 3:
562 * Section 1 in practical terms can no longer be used on rev A2. Because of
563 * that the code does not need to handle section 1 at all.
564 *
565 */
566#define BCM_SYS_EMI_START_ADDR 0x40000000UL
567#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
568
569#define BCM_SYS_EMI_OFFSET 0x40000000UL
570#define BCM_VC_EMI_OFFSET 0x80000000UL
571
572static inline int bcm_addr_is_sys_emi(unsigned long addr)
573{
574 return (addr >= BCM_SYS_EMI_START_ADDR) &&
575 (addr < BCM_VC_EMI_SEC3_START_ADDR);
576}
577
578static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
579{
580 if (bcm_addr_is_sys_emi(addr))
581 return addr + BCM_SYS_EMI_OFFSET;
582 else
583 return addr + BCM_VC_EMI_OFFSET;
584}
585
586static void bcm_inv_range(unsigned long start, unsigned long end)
587{
588 unsigned long new_start, new_end;
589
590 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
591
592 if (unlikely(end <= start))
593 return;
594
595 new_start = bcm_l2_phys_addr(start);
596 new_end = bcm_l2_phys_addr(end);
597
598 /* normal case, no cross section between start and end */
599 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
600 l2x0_inv_range(new_start, new_end);
601 return;
602 }
603
604 /* They cross sections, so it can only be a cross from section
605 * 2 to section 3
606 */
607 l2x0_inv_range(new_start,
608 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
609 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
610 new_end);
611}
612
613static void bcm_clean_range(unsigned long start, unsigned long end)
614{
615 unsigned long new_start, new_end;
616
617 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
618
619 if (unlikely(end <= start))
620 return;
621
622 if ((end - start) >= l2x0_size) {
623 l2x0_clean_all();
624 return;
625 }
626
627 new_start = bcm_l2_phys_addr(start);
628 new_end = bcm_l2_phys_addr(end);
629
630 /* normal case, no cross section between start and end */
631 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
632 l2x0_clean_range(new_start, new_end);
633 return;
634 }
635
636 /* They cross sections, so it can only be a cross from section
637 * 2 to section 3
638 */
639 l2x0_clean_range(new_start,
640 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
641 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
642 new_end);
643}
644
645static void bcm_flush_range(unsigned long start, unsigned long end)
646{
647 unsigned long new_start, new_end;
648
649 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
650
651 if (unlikely(end <= start))
652 return;
653
654 if ((end - start) >= l2x0_size) {
655 l2x0_flush_all();
656 return;
657 }
658
659 new_start = bcm_l2_phys_addr(start);
660 new_end = bcm_l2_phys_addr(end);
661
662 /* normal case, no cross section between start and end */
663 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
664 l2x0_flush_range(new_start, new_end);
665 return;
666 }
667
668 /* They cross sections, so it can only be a cross from section
669 * 2 to section 3
670 */
671 l2x0_flush_range(new_start,
672 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
673 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
674 new_end);
675}
676
Russell Kingc02642b2014-03-15 16:47:54 +0000677static void __init l2x0_of_parse(const struct device_node *np,
Russell King3e175ca2011-09-18 11:27:30 +0100678 u32 *aux_val, u32 *aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +0100679{
680 u32 data[2] = { 0, 0 };
681 u32 tag = 0;
682 u32 dirty = 0;
683 u32 val = 0, mask = 0;
684
685 of_property_read_u32(np, "arm,tag-latency", &tag);
686 if (tag) {
687 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
688 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
689 }
690
691 of_property_read_u32_array(np, "arm,data-latency",
692 data, ARRAY_SIZE(data));
693 if (data[0] && data[1]) {
694 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
695 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
696 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
697 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
698 }
699
700 of_property_read_u32(np, "arm,dirty-latency", &dirty);
701 if (dirty) {
702 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
703 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
704 }
705
706 *aux_val &= ~mask;
707 *aux_val |= val;
708 *aux_mask &= ~mask;
709}
710
Russell Kingc02642b2014-03-15 16:47:54 +0000711static void __init pl310_of_parse(const struct device_node *np,
Russell King3e175ca2011-09-18 11:27:30 +0100712 u32 *aux_val, u32 *aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +0100713{
714 u32 data[3] = { 0, 0, 0 };
715 u32 tag[3] = { 0, 0, 0 };
716 u32 filter[2] = { 0, 0 };
717
718 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
719 if (tag[0] && tag[1] && tag[2])
720 writel_relaxed(
721 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
722 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
723 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
724 l2x0_base + L2X0_TAG_LATENCY_CTRL);
725
726 of_property_read_u32_array(np, "arm,data-latency",
727 data, ARRAY_SIZE(data));
728 if (data[0] && data[1] && data[2])
729 writel_relaxed(
730 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
731 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
732 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
733 l2x0_base + L2X0_DATA_LATENCY_CTRL);
734
735 of_property_read_u32_array(np, "arm,filter-ranges",
736 filter, ARRAY_SIZE(filter));
Barry Song74d41f32011-09-14 03:20:01 +0100737 if (filter[1]) {
Rob Herring8c369262011-08-03 18:12:05 +0100738 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
739 l2x0_base + L2X0_ADDR_FILTER_END);
740 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
741 l2x0_base + L2X0_ADDR_FILTER_START);
742 }
743}
744
Barry Song91c2ebb2011-09-30 14:43:12 +0100745static void __init pl310_save(void)
746{
747 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
748 L2X0_CACHE_ID_RTL_MASK;
749
750 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
751 L2X0_TAG_LATENCY_CTRL);
752 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
753 L2X0_DATA_LATENCY_CTRL);
754 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
755 L2X0_ADDR_FILTER_END);
756 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
757 L2X0_ADDR_FILTER_START);
758
759 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
760 /*
761 * From r2p0, there is Prefetch offset/control register
762 */
763 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
764 L2X0_PREFETCH_CTRL);
765 /*
766 * From r3p0, there is Power control register
767 */
768 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
769 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
770 L2X0_POWER_CTRL);
771 }
772}
773
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100774static void aurora_save(void)
775{
776 l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
777 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
778}
779
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +0100780static void __init tauros3_save(void)
781{
782 l2x0_saved_regs.aux2_ctrl =
783 readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL);
784 l2x0_saved_regs.prefetch_ctrl =
785 readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
786}
787
Barry Song91c2ebb2011-09-30 14:43:12 +0100788static void l2x0_resume(void)
789{
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100790 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Barry Song91c2ebb2011-09-30 14:43:12 +0100791 /* restore aux ctrl and enable l2 */
792 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
793
794 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
795 L2X0_AUX_CTRL);
796
797 l2x0_inv_all();
798
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100799 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
Barry Song91c2ebb2011-09-30 14:43:12 +0100800 }
801}
802
803static void pl310_resume(void)
804{
805 u32 l2x0_revision;
806
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100807 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Barry Song91c2ebb2011-09-30 14:43:12 +0100808 /* restore pl310 setup */
809 writel_relaxed(l2x0_saved_regs.tag_latency,
810 l2x0_base + L2X0_TAG_LATENCY_CTRL);
811 writel_relaxed(l2x0_saved_regs.data_latency,
812 l2x0_base + L2X0_DATA_LATENCY_CTRL);
813 writel_relaxed(l2x0_saved_regs.filter_end,
814 l2x0_base + L2X0_ADDR_FILTER_END);
815 writel_relaxed(l2x0_saved_regs.filter_start,
816 l2x0_base + L2X0_ADDR_FILTER_START);
817
818 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
819 L2X0_CACHE_ID_RTL_MASK;
820
821 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
822 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
823 l2x0_base + L2X0_PREFETCH_CTRL);
824 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
825 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
826 l2x0_base + L2X0_POWER_CTRL);
827 }
828 }
829
830 l2x0_resume();
831}
832
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100833static void aurora_resume(void)
834{
835 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +0100836 writel_relaxed(l2x0_saved_regs.aux_ctrl,
837 l2x0_base + L2X0_AUX_CTRL);
838 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100839 }
840}
841
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +0100842static void tauros3_resume(void)
843{
844 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
845 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
846 l2x0_base + TAUROS3_AUX2_CTRL);
847 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
848 l2x0_base + L2X0_PREFETCH_CTRL);
849 }
850
851 l2x0_resume();
852}
853
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100854static void __init aurora_broadcast_l2_commands(void)
855{
856 __u32 u;
857 /* Enable Broadcasting of cache commands to L2*/
858 __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
859 u |= AURORA_CTRL_FW; /* Set the FW bit */
860 __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
861 isb();
862}
863
Russell Kingc02642b2014-03-15 16:47:54 +0000864static void __init aurora_of_parse(const struct device_node *np,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100865 u32 *aux_val, u32 *aux_mask)
866{
867 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
868 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
869
870 of_property_read_u32(np, "cache-id-part",
871 &cache_id_part_number_from_dt);
872
873 /* Determine and save the write policy */
874 l2_wt_override = of_property_read_bool(np, "wt-override");
875
876 if (l2_wt_override) {
877 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
878 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
879 }
880
881 *aux_val &= ~mask;
882 *aux_val |= val;
883 *aux_mask &= ~mask;
884}
885
Russell Kingc02642b2014-03-15 16:47:54 +0000886static const struct l2c_init_data of_pl310_data __initconst = {
887 .of_parse = pl310_of_parse,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100888 .save = pl310_save,
889 .outer_cache = {
Russell Kingce841302014-03-15 16:48:03 +0000890 .inv_range = l2x0_inv_range,
891 .clean_range = l2x0_clean_range,
892 .flush_range = l2x0_flush_range,
893 .flush_all = l2x0_flush_all,
894 .disable = l2x0_disable,
895 .sync = l2x0_cache_sync,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100896 .resume = pl310_resume,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100897 },
Barry Song91c2ebb2011-09-30 14:43:12 +0100898};
899
Russell Kingc02642b2014-03-15 16:47:54 +0000900static const struct l2c_init_data of_l2x0_data __initconst = {
901 .of_parse = l2x0_of_parse,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100902 .outer_cache = {
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100903 .inv_range = l2x0_inv_range,
904 .clean_range = l2x0_clean_range,
905 .flush_range = l2x0_flush_range,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100906 .flush_all = l2x0_flush_all,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100907 .disable = l2x0_disable,
Russell Kingce841302014-03-15 16:48:03 +0000908 .sync = l2x0_cache_sync,
909 .resume = l2x0_resume,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100910 },
Barry Song91c2ebb2011-09-30 14:43:12 +0100911};
912
Russell Kingc02642b2014-03-15 16:47:54 +0000913static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
914 .of_parse = aurora_of_parse,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100915 .save = aurora_save,
916 .outer_cache = {
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100917 .inv_range = aurora_inv_range,
918 .clean_range = aurora_clean_range,
919 .flush_range = aurora_flush_range,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100920 .flush_all = l2x0_flush_all,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100921 .disable = l2x0_disable,
Russell Kingce841302014-03-15 16:48:03 +0000922 .sync = l2x0_cache_sync,
923 .resume = aurora_resume,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100924 },
925};
926
Russell Kingc02642b2014-03-15 16:47:54 +0000927static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
928 .of_parse = aurora_of_parse,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100929 .save = aurora_save,
930 .outer_cache = {
931 .resume = aurora_resume,
932 },
933};
934
Russell Kingc02642b2014-03-15 16:47:54 +0000935static const struct l2c_init_data of_tauros3_data __initconst = {
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +0100936 .save = tauros3_save,
937 /* Tauros3 broadcasts L1 cache operations to L2 */
938 .outer_cache = {
939 .resume = tauros3_resume,
940 },
941};
942
Russell Kingc02642b2014-03-15 16:47:54 +0000943static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
944 .of_parse = pl310_of_parse,
Christian Daudt3b656fe2013-05-09 22:21:01 +0100945 .save = pl310_save,
946 .outer_cache = {
Christian Daudt3b656fe2013-05-09 22:21:01 +0100947 .inv_range = bcm_inv_range,
948 .clean_range = bcm_clean_range,
949 .flush_range = bcm_flush_range,
Christian Daudt3b656fe2013-05-09 22:21:01 +0100950 .flush_all = l2x0_flush_all,
Christian Daudt3b656fe2013-05-09 22:21:01 +0100951 .disable = l2x0_disable,
Russell Kingce841302014-03-15 16:48:03 +0000952 .sync = l2x0_cache_sync,
953 .resume = pl310_resume,
Christian Daudt3b656fe2013-05-09 22:21:01 +0100954 },
955};
956
Russell Kinga65bb922014-03-15 16:48:01 +0000957#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
Rob Herring8c369262011-08-03 18:12:05 +0100958static const struct of_device_id l2x0_ids[] __initconst = {
Russell Kingc02642b2014-03-15 16:47:54 +0000959 L2C_ID("arm,l210-cache", of_l2x0_data),
960 L2C_ID("arm,l220-cache", of_l2x0_data),
961 L2C_ID("arm,pl310-cache", of_pl310_data),
962 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
963 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
964 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
965 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
Russell Kinga65bb922014-03-15 16:48:01 +0000966 /* Deprecated IDs */
Russell Kingc02642b2014-03-15 16:47:54 +0000967 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
Rob Herring8c369262011-08-03 18:12:05 +0100968 {}
969};
970
Russell King3e175ca2011-09-18 11:27:30 +0100971int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +0100972{
Russell Kingc02642b2014-03-15 16:47:54 +0000973 const struct l2c_init_data *data;
Rob Herring8c369262011-08-03 18:12:05 +0100974 struct device_node *np;
Barry Song91c2ebb2011-09-30 14:43:12 +0100975 struct resource res;
Rob Herring8c369262011-08-03 18:12:05 +0100976
977 np = of_find_matching_node(NULL, l2x0_ids);
978 if (!np)
979 return -ENODEV;
Barry Song91c2ebb2011-09-30 14:43:12 +0100980
981 if (of_address_to_resource(np, 0, &res))
982 return -ENODEV;
983
984 l2x0_base = ioremap(res.start, resource_size(&res));
Rob Herring8c369262011-08-03 18:12:05 +0100985 if (!l2x0_base)
986 return -ENOMEM;
987
Barry Song91c2ebb2011-09-30 14:43:12 +0100988 l2x0_saved_regs.phy_base = res.start;
989
990 data = of_match_node(l2x0_ids, np)->data;
991
Rob Herring8c369262011-08-03 18:12:05 +0100992 /* L2 configuration can only be changed if the cache is disabled */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100993 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Russell Kingc02642b2014-03-15 16:47:54 +0000994 if (data->of_parse)
995 data->of_parse(np, &aux_val, &aux_mask);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100996
997 /* For aurora cache in no outer mode select the
998 * correct mode using the coprocessor*/
Russell Kingc02642b2014-03-15 16:47:54 +0000999 if (data == &of_aurora_no_outer_data)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001000 aurora_broadcast_l2_commands();
Rob Herring8c369262011-08-03 18:12:05 +01001001 }
Barry Song91c2ebb2011-09-30 14:43:12 +01001002
1003 if (data->save)
1004 data->save();
1005
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001006 of_init = true;
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001007 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
Rob Herring6e7acee2013-03-25 17:02:48 +01001008 l2x0_init(l2x0_base, aux_val, aux_mask);
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001009
Rob Herring8c369262011-08-03 18:12:05 +01001010 return 0;
1011}
1012#endif