blob: d659c4ca46bb59b417d131418bbd8ee1defece58 [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
Rob Herring8c369262011-08-03 18:12:05 +010019#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010020#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Rob Herring8c369262011-08-03 18:12:05 +010023#include <linux/of.h>
24#include <linux/of_address.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025
26#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010027#include <asm/hardware/cache-l2x0.h>
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +010028#include "cache-tauros3.h"
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010029#include "cache-aurora-l2.h"
Catalin Marinas382266a2007-02-05 14:48:19 +010030
Russell Kingc02642b2014-03-15 16:47:54 +000031struct l2c_init_data {
32 void (*of_parse)(const struct device_node *, u32 *, u32 *);
33 void (*save)(void);
34 struct outer_cache_fns outer_cache;
35};
36
Catalin Marinas382266a2007-02-05 14:48:19 +010037#define CACHE_LINE_SIZE 32
38
39static void __iomem *l2x0_base;
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050040static DEFINE_RAW_SPINLOCK(l2x0_lock);
Russell King3e175ca2011-09-18 11:27:30 +010041static u32 l2x0_way_mask; /* Bitmask of active ways */
42static u32 l2x0_size;
Will Deaconf154fe92012-04-20 17:21:08 +010043static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
Catalin Marinas382266a2007-02-05 14:48:19 +010044
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010045/* Aurora don't have the cache ID register available, so we have to
46 * pass it though the device tree */
47static u32 cache_id_part_number_from_dt;
48
Barry Song91c2ebb2011-09-30 14:43:12 +010049struct l2x0_regs l2x0_saved_regs;
50
Gregory CLEMENT6248d062012-10-01 10:56:42 +010051static bool of_init = false;
52
Catalin Marinas9a6655e2010-08-31 13:05:22 +010053static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010054{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010055 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010056 while (readl_relaxed(reg) & mask)
Barry Song1caf3092011-09-09 10:30:34 +010057 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010058}
59
Catalin Marinas9a6655e2010-08-31 13:05:22 +010060#ifdef CONFIG_CACHE_PL310
61static inline void cache_wait(void __iomem *reg, unsigned long mask)
62{
63 /* cache operations by line are atomic on PL310 */
64}
65#else
66#define cache_wait cache_wait_way
67#endif
68
Catalin Marinas382266a2007-02-05 14:48:19 +010069static inline void cache_sync(void)
70{
Russell King3d107432009-11-19 11:41:09 +000071 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +010072
Will Deaconf154fe92012-04-20 17:21:08 +010073 writel_relaxed(0, base + sync_reg_offset);
Russell King3d107432009-11-19 11:41:09 +000074 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +010075}
76
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010077static inline void l2x0_clean_line(unsigned long addr)
78{
79 void __iomem *base = l2x0_base;
80 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010081 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010082}
83
84static inline void l2x0_inv_line(unsigned long addr)
85{
86 void __iomem *base = l2x0_base;
87 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010088 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010089}
90
Santosh Shilimkar2839e062011-03-08 06:59:54 +010091#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Will Deaconab4d5362012-04-20 17:22:11 +010092static inline void debug_writel(unsigned long val)
93{
94 if (outer_cache.set_debug)
95 outer_cache.set_debug(val);
96}
Santosh Shilimkar9e655822010-02-04 19:42:42 +010097
Will Deaconab4d5362012-04-20 17:22:11 +010098static void pl310_set_debug(unsigned long val)
Santosh Shilimkar2839e062011-03-08 06:59:54 +010099{
100 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
101}
102#else
103/* Optimised out for non-errata case */
104static inline void debug_writel(unsigned long val)
105{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100106}
107
Will Deaconab4d5362012-04-20 17:22:11 +0100108#define pl310_set_debug NULL
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100109#endif
110
111#ifdef CONFIG_PL310_ERRATA_588369
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100112static inline void l2x0_flush_line(unsigned long addr)
113{
114 void __iomem *base = l2x0_base;
115
116 /* Clean by PA followed by Invalidate by PA */
117 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100118 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100119 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100120 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100121}
122#else
123
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100124static inline void l2x0_flush_line(unsigned long addr)
125{
126 void __iomem *base = l2x0_base;
127 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100128 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100129}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100130#endif
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100131
Catalin Marinas23107c52010-03-24 16:48:53 +0100132static void l2x0_cache_sync(void)
133{
134 unsigned long flags;
135
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500136 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100137 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500138 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100139}
140
Will Deacon38a89142011-07-01 14:36:19 +0100141static void __l2x0_flush_all(void)
142{
143 debug_writel(0x03);
144 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
145 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
146 cache_sync();
147 debug_writel(0x00);
148}
149
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530150static void l2x0_flush_all(void)
151{
152 unsigned long flags;
153
154 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500155 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100156 __l2x0_flush_all();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500157 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530158}
159
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530160static void l2x0_clean_all(void)
161{
162 unsigned long flags;
163
164 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500165 raw_spin_lock_irqsave(&l2x0_lock, flags);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530166 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
167 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
168 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500169 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530170}
171
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530172static void l2x0_inv_all(void)
Catalin Marinas382266a2007-02-05 14:48:19 +0100173{
Russell King0eb948d2009-11-19 11:12:15 +0000174 unsigned long flags;
175
Catalin Marinas382266a2007-02-05 14:48:19 +0100176 /* invalidate all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500177 raw_spin_lock_irqsave(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530178 /* Invalidating when L2 is enabled is a nono */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100179 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
Catalin Marinas6775a552010-07-28 22:01:25 +0100180 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100181 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
Catalin Marinas382266a2007-02-05 14:48:19 +0100182 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500183 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100184}
185
186static void l2x0_inv_range(unsigned long start, unsigned long end)
187{
Russell King3d107432009-11-19 11:41:09 +0000188 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000189 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100190
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500191 raw_spin_lock_irqsave(&l2x0_lock, flags);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100192 if (start & (CACHE_LINE_SIZE - 1)) {
193 start &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100194 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100195 l2x0_flush_line(start);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100196 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100197 start += CACHE_LINE_SIZE;
198 }
199
200 if (end & (CACHE_LINE_SIZE - 1)) {
201 end &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100202 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100203 l2x0_flush_line(end);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100204 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100205 }
206
Russell King0eb948d2009-11-19 11:12:15 +0000207 while (start < end) {
208 unsigned long blk_end = start + min(end - start, 4096UL);
209
210 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100211 l2x0_inv_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000212 start += CACHE_LINE_SIZE;
213 }
214
215 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500216 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
217 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000218 }
219 }
Russell King3d107432009-11-19 11:41:09 +0000220 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100221 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500222 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100223}
224
225static void l2x0_clean_range(unsigned long start, unsigned long end)
226{
Russell King3d107432009-11-19 11:41:09 +0000227 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000228 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100229
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530230 if ((end - start) >= l2x0_size) {
231 l2x0_clean_all();
232 return;
233 }
234
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500235 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100236 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000237 while (start < end) {
238 unsigned long blk_end = start + min(end - start, 4096UL);
239
240 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100241 l2x0_clean_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000242 start += CACHE_LINE_SIZE;
243 }
244
245 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500246 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
247 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000248 }
249 }
Russell King3d107432009-11-19 11:41:09 +0000250 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100251 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500252 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100253}
254
255static void l2x0_flush_range(unsigned long start, unsigned long end)
256{
Russell King3d107432009-11-19 11:41:09 +0000257 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000258 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100259
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530260 if ((end - start) >= l2x0_size) {
261 l2x0_flush_all();
262 return;
263 }
264
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500265 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100266 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000267 while (start < end) {
268 unsigned long blk_end = start + min(end - start, 4096UL);
269
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100270 debug_writel(0x03);
Russell King0eb948d2009-11-19 11:12:15 +0000271 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100272 l2x0_flush_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000273 start += CACHE_LINE_SIZE;
274 }
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100275 debug_writel(0x00);
Russell King0eb948d2009-11-19 11:12:15 +0000276
277 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500278 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
279 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000280 }
281 }
Russell King3d107432009-11-19 11:41:09 +0000282 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100283 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500284 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100285}
286
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530287static void l2x0_disable(void)
288{
289 unsigned long flags;
290
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500291 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100292 __l2x0_flush_all();
293 writel_relaxed(0, l2x0_base + L2X0_CTRL);
Will Deacon9781aa82013-06-12 09:59:59 +0100294 dsb(st);
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500295 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530296}
297
Russell King3e175ca2011-09-18 11:27:30 +0100298static void l2x0_unlock(u32 cache_id)
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100299{
300 int lockregs;
301 int i;
302
Rob Herring6e7acee2013-03-25 17:02:48 +0100303 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100304 case L2X0_CACHE_ID_PART_L310:
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100305 lockregs = 8;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100306 break;
307 case AURORA_CACHE_ID:
308 lockregs = 4;
309 break;
310 default:
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100311 /* L210 and unknown types */
312 lockregs = 1;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100313 break;
314 }
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100315
316 for (i = 0; i < lockregs; i++) {
317 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
318 i * L2X0_LOCKDOWN_STRIDE);
319 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
320 i * L2X0_LOCKDOWN_STRIDE);
321 }
322}
323
Russell King3e175ca2011-09-18 11:27:30 +0100324void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
Catalin Marinas382266a2007-02-05 14:48:19 +0100325{
Russell King3e175ca2011-09-18 11:27:30 +0100326 u32 aux;
327 u32 cache_id;
328 u32 way_size = 0;
Jason McMullan64039be2010-05-05 18:59:37 +0100329 int ways;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100330 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
Jason McMullan64039be2010-05-05 18:59:37 +0100331 const char *type;
Catalin Marinas382266a2007-02-05 14:48:19 +0100332
333 l2x0_base = base;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100334 if (cache_id_part_number_from_dt)
335 cache_id = cache_id_part_number_from_dt;
336 else
Rob Herring6e7acee2013-03-25 17:02:48 +0100337 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
Catalin Marinas6775a552010-07-28 22:01:25 +0100338 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100339
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100340 aux &= aux_mask;
341 aux |= aux_val;
342
Jason McMullan64039be2010-05-05 18:59:37 +0100343 /* Determine the number of ways */
Rob Herring6e7acee2013-03-25 17:02:48 +0100344 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100345 case L2X0_CACHE_ID_PART_L310:
346 if (aux & (1 << 16))
347 ways = 16;
348 else
349 ways = 8;
350 type = "L310";
Will Deaconf154fe92012-04-20 17:21:08 +0100351#ifdef CONFIG_PL310_ERRATA_753970
352 /* Unmapped register. */
353 sync_reg_offset = L2X0_DUMMY_REG;
354#endif
Rob Herring74ddcdb2012-12-21 22:42:39 +0100355 if ((cache_id & L2X0_CACHE_ID_RTL_MASK) <= L2X0_CACHE_ID_RTL_R3P0)
356 outer_cache.set_debug = pl310_set_debug;
Jason McMullan64039be2010-05-05 18:59:37 +0100357 break;
358 case L2X0_CACHE_ID_PART_L210:
359 ways = (aux >> 13) & 0xf;
360 type = "L210";
361 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100362
363 case AURORA_CACHE_ID:
364 sync_reg_offset = AURORA_SYNC_REG;
365 ways = (aux >> 13) & 0xf;
366 ways = 2 << ((ways + 1) >> 2);
367 way_size_shift = AURORA_WAY_SIZE_SHIFT;
368 type = "Aurora";
369 break;
Jason McMullan64039be2010-05-05 18:59:37 +0100370 default:
371 /* Assume unknown chips have 8 ways */
372 ways = 8;
373 type = "L2x0 series";
374 break;
375 }
376
377 l2x0_way_mask = (1 << ways) - 1;
378
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100379 /*
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530380 * L2 cache Size = Way size * Number of ways
381 */
382 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100383 way_size = 1 << (way_size + way_size_shift);
384
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530385 l2x0_size = ways * way_size * SZ_1K;
386
387 /*
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100388 * Check if l2x0 controller is already enabled.
389 * If you are booting from non-secure mode
390 * accessing the below registers will fault.
391 */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100392 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100393 /* Make sure that I&D is not locked down when starting */
394 l2x0_unlock(cache_id);
Catalin Marinas382266a2007-02-05 14:48:19 +0100395
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100396 /* l2x0 controller is disabled */
Catalin Marinas6775a552010-07-28 22:01:25 +0100397 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
Catalin Marinas382266a2007-02-05 14:48:19 +0100398
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100399 l2x0_inv_all();
400
401 /* enable L2X0 */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100402 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100403 }
Catalin Marinas382266a2007-02-05 14:48:19 +0100404
Yilu Mao9d4876f2012-09-03 09:14:56 +0100405 /* Re-read it in case some bits are reserved. */
406 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
407
408 /* Save the value for resuming. */
409 l2x0_saved_regs.aux_ctrl = aux;
410
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100411 if (!of_init) {
412 outer_cache.inv_range = l2x0_inv_range;
413 outer_cache.clean_range = l2x0_clean_range;
414 outer_cache.flush_range = l2x0_flush_range;
415 outer_cache.sync = l2x0_cache_sync;
416 outer_cache.flush_all = l2x0_flush_all;
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100417 outer_cache.disable = l2x0_disable;
418 }
Catalin Marinas382266a2007-02-05 14:48:19 +0100419
Fabio Estevamc477b8d2013-08-16 13:04:32 +0100420 pr_info("%s cache controller enabled\n", type);
421 pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
422 ways, cache_id, aux, l2x0_size >> 10);
Catalin Marinas382266a2007-02-05 14:48:19 +0100423}
Rob Herring8c369262011-08-03 18:12:05 +0100424
425#ifdef CONFIG_OF
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100426static int l2_wt_override;
427
428/*
429 * Note that the end addresses passed to Linux primitives are
430 * noninclusive, while the hardware cache range operations use
431 * inclusive start and end addresses.
432 */
433static unsigned long calc_range_end(unsigned long start, unsigned long end)
434{
435 /*
436 * Limit the number of cache lines processed at once,
437 * since cache range operations stall the CPU pipeline
438 * until completion.
439 */
440 if (end > start + MAX_RANGE_SIZE)
441 end = start + MAX_RANGE_SIZE;
442
443 /*
444 * Cache range operations can't straddle a page boundary.
445 */
446 if (end > PAGE_ALIGN(start+1))
447 end = PAGE_ALIGN(start+1);
448
449 return end;
450}
451
452/*
453 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
454 * and range operations only do a TLB lookup on the start address.
455 */
456static void aurora_pa_range(unsigned long start, unsigned long end,
457 unsigned long offset)
458{
459 unsigned long flags;
460
461 raw_spin_lock_irqsave(&l2x0_lock, flags);
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +0100462 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
463 writel_relaxed(end, l2x0_base + offset);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100464 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
465
466 cache_sync();
467}
468
469static void aurora_inv_range(unsigned long start, unsigned long end)
470{
471 /*
472 * round start and end adresses up to cache line size
473 */
474 start &= ~(CACHE_LINE_SIZE - 1);
475 end = ALIGN(end, CACHE_LINE_SIZE);
476
477 /*
478 * Invalidate all full cache lines between 'start' and 'end'.
479 */
480 while (start < end) {
481 unsigned long range_end = calc_range_end(start, end);
482 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
483 AURORA_INVAL_RANGE_REG);
484 start = range_end;
485 }
486}
487
488static void aurora_clean_range(unsigned long start, unsigned long end)
489{
490 /*
491 * If L2 is forced to WT, the L2 will always be clean and we
492 * don't need to do anything here.
493 */
494 if (!l2_wt_override) {
495 start &= ~(CACHE_LINE_SIZE - 1);
496 end = ALIGN(end, CACHE_LINE_SIZE);
497 while (start != end) {
498 unsigned long range_end = calc_range_end(start, end);
499 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
500 AURORA_CLEAN_RANGE_REG);
501 start = range_end;
502 }
503 }
504}
505
506static void aurora_flush_range(unsigned long start, unsigned long end)
507{
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100508 start &= ~(CACHE_LINE_SIZE - 1);
509 end = ALIGN(end, CACHE_LINE_SIZE);
510 while (start != end) {
511 unsigned long range_end = calc_range_end(start, end);
512 /*
513 * If L2 is forced to WT, the L2 will always be clean and we
514 * just need to invalidate.
515 */
516 if (l2_wt_override)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100517 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100518 AURORA_INVAL_RANGE_REG);
519 else
520 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
521 AURORA_FLUSH_RANGE_REG);
522 start = range_end;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100523 }
524}
525
Christian Daudt3b656fe2013-05-09 22:21:01 +0100526/*
527 * For certain Broadcom SoCs, depending on the address range, different offsets
528 * need to be added to the address before passing it to L2 for
529 * invalidation/clean/flush
530 *
531 * Section Address Range Offset EMI
532 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
533 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
534 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
535 *
536 * When the start and end addresses have crossed two different sections, we
537 * need to break the L2 operation into two, each within its own section.
538 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
539 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
540 * 0xC0000000 - 0xC0001000
541 *
542 * Note 1:
543 * By breaking a single L2 operation into two, we may potentially suffer some
544 * performance hit, but keep in mind the cross section case is very rare
545 *
546 * Note 2:
547 * We do not need to handle the case when the start address is in
548 * Section 1 and the end address is in Section 3, since it is not a valid use
549 * case
550 *
551 * Note 3:
552 * Section 1 in practical terms can no longer be used on rev A2. Because of
553 * that the code does not need to handle section 1 at all.
554 *
555 */
556#define BCM_SYS_EMI_START_ADDR 0x40000000UL
557#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
558
559#define BCM_SYS_EMI_OFFSET 0x40000000UL
560#define BCM_VC_EMI_OFFSET 0x80000000UL
561
562static inline int bcm_addr_is_sys_emi(unsigned long addr)
563{
564 return (addr >= BCM_SYS_EMI_START_ADDR) &&
565 (addr < BCM_VC_EMI_SEC3_START_ADDR);
566}
567
568static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
569{
570 if (bcm_addr_is_sys_emi(addr))
571 return addr + BCM_SYS_EMI_OFFSET;
572 else
573 return addr + BCM_VC_EMI_OFFSET;
574}
575
576static void bcm_inv_range(unsigned long start, unsigned long end)
577{
578 unsigned long new_start, new_end;
579
580 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
581
582 if (unlikely(end <= start))
583 return;
584
585 new_start = bcm_l2_phys_addr(start);
586 new_end = bcm_l2_phys_addr(end);
587
588 /* normal case, no cross section between start and end */
589 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
590 l2x0_inv_range(new_start, new_end);
591 return;
592 }
593
594 /* They cross sections, so it can only be a cross from section
595 * 2 to section 3
596 */
597 l2x0_inv_range(new_start,
598 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
599 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
600 new_end);
601}
602
603static void bcm_clean_range(unsigned long start, unsigned long end)
604{
605 unsigned long new_start, new_end;
606
607 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
608
609 if (unlikely(end <= start))
610 return;
611
612 if ((end - start) >= l2x0_size) {
613 l2x0_clean_all();
614 return;
615 }
616
617 new_start = bcm_l2_phys_addr(start);
618 new_end = bcm_l2_phys_addr(end);
619
620 /* normal case, no cross section between start and end */
621 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
622 l2x0_clean_range(new_start, new_end);
623 return;
624 }
625
626 /* They cross sections, so it can only be a cross from section
627 * 2 to section 3
628 */
629 l2x0_clean_range(new_start,
630 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
631 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
632 new_end);
633}
634
635static void bcm_flush_range(unsigned long start, unsigned long end)
636{
637 unsigned long new_start, new_end;
638
639 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
640
641 if (unlikely(end <= start))
642 return;
643
644 if ((end - start) >= l2x0_size) {
645 l2x0_flush_all();
646 return;
647 }
648
649 new_start = bcm_l2_phys_addr(start);
650 new_end = bcm_l2_phys_addr(end);
651
652 /* normal case, no cross section between start and end */
653 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
654 l2x0_flush_range(new_start, new_end);
655 return;
656 }
657
658 /* They cross sections, so it can only be a cross from section
659 * 2 to section 3
660 */
661 l2x0_flush_range(new_start,
662 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
663 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
664 new_end);
665}
666
Russell Kingc02642b2014-03-15 16:47:54 +0000667static void __init l2x0_of_parse(const struct device_node *np,
Russell King3e175ca2011-09-18 11:27:30 +0100668 u32 *aux_val, u32 *aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +0100669{
670 u32 data[2] = { 0, 0 };
671 u32 tag = 0;
672 u32 dirty = 0;
673 u32 val = 0, mask = 0;
674
675 of_property_read_u32(np, "arm,tag-latency", &tag);
676 if (tag) {
677 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
678 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
679 }
680
681 of_property_read_u32_array(np, "arm,data-latency",
682 data, ARRAY_SIZE(data));
683 if (data[0] && data[1]) {
684 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
685 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
686 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
687 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
688 }
689
690 of_property_read_u32(np, "arm,dirty-latency", &dirty);
691 if (dirty) {
692 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
693 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
694 }
695
696 *aux_val &= ~mask;
697 *aux_val |= val;
698 *aux_mask &= ~mask;
699}
700
Russell Kingc02642b2014-03-15 16:47:54 +0000701static void __init pl310_of_parse(const struct device_node *np,
Russell King3e175ca2011-09-18 11:27:30 +0100702 u32 *aux_val, u32 *aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +0100703{
704 u32 data[3] = { 0, 0, 0 };
705 u32 tag[3] = { 0, 0, 0 };
706 u32 filter[2] = { 0, 0 };
707
708 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
709 if (tag[0] && tag[1] && tag[2])
710 writel_relaxed(
711 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
712 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
713 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
714 l2x0_base + L2X0_TAG_LATENCY_CTRL);
715
716 of_property_read_u32_array(np, "arm,data-latency",
717 data, ARRAY_SIZE(data));
718 if (data[0] && data[1] && data[2])
719 writel_relaxed(
720 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
721 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
722 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
723 l2x0_base + L2X0_DATA_LATENCY_CTRL);
724
725 of_property_read_u32_array(np, "arm,filter-ranges",
726 filter, ARRAY_SIZE(filter));
Barry Song74d41f32011-09-14 03:20:01 +0100727 if (filter[1]) {
Rob Herring8c369262011-08-03 18:12:05 +0100728 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
729 l2x0_base + L2X0_ADDR_FILTER_END);
730 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
731 l2x0_base + L2X0_ADDR_FILTER_START);
732 }
733}
734
Barry Song91c2ebb2011-09-30 14:43:12 +0100735static void __init pl310_save(void)
736{
737 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
738 L2X0_CACHE_ID_RTL_MASK;
739
740 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
741 L2X0_TAG_LATENCY_CTRL);
742 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
743 L2X0_DATA_LATENCY_CTRL);
744 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
745 L2X0_ADDR_FILTER_END);
746 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
747 L2X0_ADDR_FILTER_START);
748
749 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
750 /*
751 * From r2p0, there is Prefetch offset/control register
752 */
753 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
754 L2X0_PREFETCH_CTRL);
755 /*
756 * From r3p0, there is Power control register
757 */
758 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
759 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
760 L2X0_POWER_CTRL);
761 }
762}
763
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100764static void aurora_save(void)
765{
766 l2x0_saved_regs.ctrl = readl_relaxed(l2x0_base + L2X0_CTRL);
767 l2x0_saved_regs.aux_ctrl = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
768}
769
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +0100770static void __init tauros3_save(void)
771{
772 l2x0_saved_regs.aux2_ctrl =
773 readl_relaxed(l2x0_base + TAUROS3_AUX2_CTRL);
774 l2x0_saved_regs.prefetch_ctrl =
775 readl_relaxed(l2x0_base + L2X0_PREFETCH_CTRL);
776}
777
Barry Song91c2ebb2011-09-30 14:43:12 +0100778static void l2x0_resume(void)
779{
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100780 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Barry Song91c2ebb2011-09-30 14:43:12 +0100781 /* restore aux ctrl and enable l2 */
782 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
783
784 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
785 L2X0_AUX_CTRL);
786
787 l2x0_inv_all();
788
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100789 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
Barry Song91c2ebb2011-09-30 14:43:12 +0100790 }
791}
792
793static void pl310_resume(void)
794{
795 u32 l2x0_revision;
796
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100797 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Barry Song91c2ebb2011-09-30 14:43:12 +0100798 /* restore pl310 setup */
799 writel_relaxed(l2x0_saved_regs.tag_latency,
800 l2x0_base + L2X0_TAG_LATENCY_CTRL);
801 writel_relaxed(l2x0_saved_regs.data_latency,
802 l2x0_base + L2X0_DATA_LATENCY_CTRL);
803 writel_relaxed(l2x0_saved_regs.filter_end,
804 l2x0_base + L2X0_ADDR_FILTER_END);
805 writel_relaxed(l2x0_saved_regs.filter_start,
806 l2x0_base + L2X0_ADDR_FILTER_START);
807
808 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
809 L2X0_CACHE_ID_RTL_MASK;
810
811 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
812 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
813 l2x0_base + L2X0_PREFETCH_CTRL);
814 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
815 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
816 l2x0_base + L2X0_POWER_CTRL);
817 }
818 }
819
820 l2x0_resume();
821}
822
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100823static void aurora_resume(void)
824{
825 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +0100826 writel_relaxed(l2x0_saved_regs.aux_ctrl,
827 l2x0_base + L2X0_AUX_CTRL);
828 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100829 }
830}
831
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +0100832static void tauros3_resume(void)
833{
834 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
835 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
836 l2x0_base + TAUROS3_AUX2_CTRL);
837 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
838 l2x0_base + L2X0_PREFETCH_CTRL);
839 }
840
841 l2x0_resume();
842}
843
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100844static void __init aurora_broadcast_l2_commands(void)
845{
846 __u32 u;
847 /* Enable Broadcasting of cache commands to L2*/
848 __asm__ __volatile__("mrc p15, 1, %0, c15, c2, 0" : "=r"(u));
849 u |= AURORA_CTRL_FW; /* Set the FW bit */
850 __asm__ __volatile__("mcr p15, 1, %0, c15, c2, 0\n" : : "r"(u));
851 isb();
852}
853
Russell Kingc02642b2014-03-15 16:47:54 +0000854static void __init aurora_of_parse(const struct device_node *np,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100855 u32 *aux_val, u32 *aux_mask)
856{
857 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
858 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
859
860 of_property_read_u32(np, "cache-id-part",
861 &cache_id_part_number_from_dt);
862
863 /* Determine and save the write policy */
864 l2_wt_override = of_property_read_bool(np, "wt-override");
865
866 if (l2_wt_override) {
867 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
868 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
869 }
870
871 *aux_val &= ~mask;
872 *aux_val |= val;
873 *aux_mask &= ~mask;
874}
875
Russell Kingc02642b2014-03-15 16:47:54 +0000876static const struct l2c_init_data of_pl310_data __initconst = {
877 .of_parse = pl310_of_parse,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100878 .save = pl310_save,
879 .outer_cache = {
Russell Kingce841302014-03-15 16:48:03 +0000880 .inv_range = l2x0_inv_range,
881 .clean_range = l2x0_clean_range,
882 .flush_range = l2x0_flush_range,
883 .flush_all = l2x0_flush_all,
884 .disable = l2x0_disable,
885 .sync = l2x0_cache_sync,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100886 .resume = pl310_resume,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100887 },
Barry Song91c2ebb2011-09-30 14:43:12 +0100888};
889
Russell Kingc02642b2014-03-15 16:47:54 +0000890static const struct l2c_init_data of_l2x0_data __initconst = {
891 .of_parse = l2x0_of_parse,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100892 .outer_cache = {
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100893 .inv_range = l2x0_inv_range,
894 .clean_range = l2x0_clean_range,
895 .flush_range = l2x0_flush_range,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100896 .flush_all = l2x0_flush_all,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100897 .disable = l2x0_disable,
Russell Kingce841302014-03-15 16:48:03 +0000898 .sync = l2x0_cache_sync,
899 .resume = l2x0_resume,
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100900 },
Barry Song91c2ebb2011-09-30 14:43:12 +0100901};
902
Russell Kingc02642b2014-03-15 16:47:54 +0000903static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
904 .of_parse = aurora_of_parse,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100905 .save = aurora_save,
906 .outer_cache = {
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100907 .inv_range = aurora_inv_range,
908 .clean_range = aurora_clean_range,
909 .flush_range = aurora_flush_range,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100910 .flush_all = l2x0_flush_all,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100911 .disable = l2x0_disable,
Russell Kingce841302014-03-15 16:48:03 +0000912 .sync = l2x0_cache_sync,
913 .resume = aurora_resume,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100914 },
915};
916
Russell Kingc02642b2014-03-15 16:47:54 +0000917static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
918 .of_parse = aurora_of_parse,
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100919 .save = aurora_save,
920 .outer_cache = {
921 .resume = aurora_resume,
922 },
923};
924
Russell Kingc02642b2014-03-15 16:47:54 +0000925static const struct l2c_init_data of_tauros3_data __initconst = {
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +0100926 .save = tauros3_save,
927 /* Tauros3 broadcasts L1 cache operations to L2 */
928 .outer_cache = {
929 .resume = tauros3_resume,
930 },
931};
932
Russell Kingc02642b2014-03-15 16:47:54 +0000933static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
934 .of_parse = pl310_of_parse,
Christian Daudt3b656fe2013-05-09 22:21:01 +0100935 .save = pl310_save,
936 .outer_cache = {
Christian Daudt3b656fe2013-05-09 22:21:01 +0100937 .inv_range = bcm_inv_range,
938 .clean_range = bcm_clean_range,
939 .flush_range = bcm_flush_range,
Christian Daudt3b656fe2013-05-09 22:21:01 +0100940 .flush_all = l2x0_flush_all,
Christian Daudt3b656fe2013-05-09 22:21:01 +0100941 .disable = l2x0_disable,
Russell Kingce841302014-03-15 16:48:03 +0000942 .sync = l2x0_cache_sync,
943 .resume = pl310_resume,
Christian Daudt3b656fe2013-05-09 22:21:01 +0100944 },
945};
946
Russell Kinga65bb922014-03-15 16:48:01 +0000947#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
Rob Herring8c369262011-08-03 18:12:05 +0100948static const struct of_device_id l2x0_ids[] __initconst = {
Russell Kingc02642b2014-03-15 16:47:54 +0000949 L2C_ID("arm,l210-cache", of_l2x0_data),
950 L2C_ID("arm,l220-cache", of_l2x0_data),
951 L2C_ID("arm,pl310-cache", of_pl310_data),
952 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
953 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
954 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
955 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
Russell Kinga65bb922014-03-15 16:48:01 +0000956 /* Deprecated IDs */
Russell Kingc02642b2014-03-15 16:47:54 +0000957 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
Rob Herring8c369262011-08-03 18:12:05 +0100958 {}
959};
960
Russell King3e175ca2011-09-18 11:27:30 +0100961int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +0100962{
Russell Kingc02642b2014-03-15 16:47:54 +0000963 const struct l2c_init_data *data;
Rob Herring8c369262011-08-03 18:12:05 +0100964 struct device_node *np;
Barry Song91c2ebb2011-09-30 14:43:12 +0100965 struct resource res;
Rob Herring8c369262011-08-03 18:12:05 +0100966
967 np = of_find_matching_node(NULL, l2x0_ids);
968 if (!np)
969 return -ENODEV;
Barry Song91c2ebb2011-09-30 14:43:12 +0100970
971 if (of_address_to_resource(np, 0, &res))
972 return -ENODEV;
973
974 l2x0_base = ioremap(res.start, resource_size(&res));
Rob Herring8c369262011-08-03 18:12:05 +0100975 if (!l2x0_base)
976 return -ENOMEM;
977
Barry Song91c2ebb2011-09-30 14:43:12 +0100978 l2x0_saved_regs.phy_base = res.start;
979
980 data = of_match_node(l2x0_ids, np)->data;
981
Rob Herring8c369262011-08-03 18:12:05 +0100982 /* L2 configuration can only be changed if the cache is disabled */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100983 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
Russell Kingc02642b2014-03-15 16:47:54 +0000984 if (data->of_parse)
985 data->of_parse(np, &aux_val, &aux_mask);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100986
987 /* For aurora cache in no outer mode select the
988 * correct mode using the coprocessor*/
Russell Kingc02642b2014-03-15 16:47:54 +0000989 if (data == &of_aurora_no_outer_data)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100990 aurora_broadcast_l2_commands();
Rob Herring8c369262011-08-03 18:12:05 +0100991 }
Barry Song91c2ebb2011-09-30 14:43:12 +0100992
993 if (data->save)
994 data->save();
995
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100996 of_init = true;
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100997 memcpy(&outer_cache, &data->outer_cache, sizeof(outer_cache));
Rob Herring6e7acee2013-03-25 17:02:48 +0100998 l2x0_init(l2x0_base, aux_val, aux_mask);
Gregory CLEMENT6248d062012-10-01 10:56:42 +0100999
Rob Herring8c369262011-08-03 18:12:05 +01001000 return 0;
1001}
1002#endif