blob: 5f381af1a7a4291244d85fed8ab121a7008edd3e [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
Rob Herring8c369262011-08-03 18:12:05 +010019#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010020#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Rob Herring8c369262011-08-03 18:12:05 +010023#include <linux/of.h>
24#include <linux/of_address.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025
26#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010027#include <asm/hardware/cache-l2x0.h>
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +010028#include "cache-tauros3.h"
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010029#include "cache-aurora-l2.h"
Catalin Marinas382266a2007-02-05 14:48:19 +010030
Russell Kingc02642b2014-03-15 16:47:54 +000031struct l2c_init_data {
Russell King3b8bad52014-03-15 16:47:57 +000032 unsigned num_lock;
Russell Kingc02642b2014-03-15 16:47:54 +000033 void (*of_parse)(const struct device_node *, u32 *, u32 *);
Russell King3b8bad52014-03-15 16:47:57 +000034 void (*enable)(void __iomem *, u32, unsigned);
Russell King9846dfc2014-03-15 16:47:55 +000035 void (*save)(void __iomem *);
Russell Kingc02642b2014-03-15 16:47:54 +000036 struct outer_cache_fns outer_cache;
37};
38
Catalin Marinas382266a2007-02-05 14:48:19 +010039#define CACHE_LINE_SIZE 32
40
41static void __iomem *l2x0_base;
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050042static DEFINE_RAW_SPINLOCK(l2x0_lock);
Russell King3e175ca2011-09-18 11:27:30 +010043static u32 l2x0_way_mask; /* Bitmask of active ways */
44static u32 l2x0_size;
Will Deaconf154fe92012-04-20 17:21:08 +010045static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
Catalin Marinas382266a2007-02-05 14:48:19 +010046
Barry Song91c2ebb2011-09-30 14:43:12 +010047struct l2x0_regs l2x0_saved_regs;
48
Russell King37abcdb2014-03-15 16:47:50 +000049/*
50 * Common code for all cache controllers.
51 */
Russell King83841fe2014-03-15 16:48:14 +000052static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010053{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010054 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010055 while (readl_relaxed(reg) & mask)
Barry Song1caf3092011-09-09 10:30:34 +010056 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010057}
58
Russell King2b2a87a2014-03-16 17:19:21 +000059/*
60 * This should only be called when we have a requirement that the
61 * register be written due to a work-around, as platforms running
62 * in non-secure mode may not be able to access this register.
63 */
64static inline void l2c_set_debug(void __iomem *base, unsigned long val)
65{
66 outer_cache.set_debug(val);
67}
68
Russell Kingdf5dd4c2014-03-15 16:47:56 +000069static void __l2c_op_way(void __iomem *reg)
70{
71 writel_relaxed(l2x0_way_mask, reg);
Russell King83841fe2014-03-15 16:48:14 +000072 l2c_wait_mask(reg, l2x0_way_mask);
Russell Kingdf5dd4c2014-03-15 16:47:56 +000073}
74
Russell King37abcdb2014-03-15 16:47:50 +000075static inline void l2c_unlock(void __iomem *base, unsigned num)
76{
77 unsigned i;
78
79 for (i = 0; i < num; i++) {
80 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
81 i * L2X0_LOCKDOWN_STRIDE);
82 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
83 i * L2X0_LOCKDOWN_STRIDE);
84 }
85}
86
Russell King3b8bad52014-03-15 16:47:57 +000087/*
88 * Enable the L2 cache controller. This function must only be
89 * called when the cache controller is known to be disabled.
90 */
91static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
92{
93 unsigned long flags;
94
Russell King9a07f272014-03-17 20:10:31 +000095 /* Only write the aux register if it needs changing */
96 if (readl_relaxed(base + L2X0_AUX_CTRL) != aux)
97 writel_relaxed(aux, base + L2X0_AUX_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +000098
Russell King17f3f992014-03-17 17:15:02 +000099 l2c_unlock(base, num_lock);
100
Russell King3b8bad52014-03-15 16:47:57 +0000101 local_irq_save(flags);
102 __l2c_op_way(base + L2X0_INV_WAY);
103 writel_relaxed(0, base + sync_reg_offset);
104 l2c_wait_mask(base + sync_reg_offset, 1);
105 local_irq_restore(flags);
106
107 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
108}
109
110static void l2c_disable(void)
111{
112 void __iomem *base = l2x0_base;
113
114 outer_cache.flush_all();
115 writel_relaxed(0, base + L2X0_CTRL);
116 dsb(st);
117}
118
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100119#ifdef CONFIG_CACHE_PL310
120static inline void cache_wait(void __iomem *reg, unsigned long mask)
121{
122 /* cache operations by line are atomic on PL310 */
123}
124#else
Russell King83841fe2014-03-15 16:48:14 +0000125#define cache_wait l2c_wait_mask
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100126#endif
127
Catalin Marinas382266a2007-02-05 14:48:19 +0100128static inline void cache_sync(void)
129{
Russell King3d107432009-11-19 11:41:09 +0000130 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +0100131
Will Deaconf154fe92012-04-20 17:21:08 +0100132 writel_relaxed(0, base + sync_reg_offset);
Russell King3d107432009-11-19 11:41:09 +0000133 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100134}
135
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100136static inline void l2x0_clean_line(unsigned long addr)
137{
138 void __iomem *base = l2x0_base;
139 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100140 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100141}
142
143static inline void l2x0_inv_line(unsigned long addr)
144{
145 void __iomem *base = l2x0_base;
146 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100147 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100148}
149
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100150#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Will Deaconab4d5362012-04-20 17:22:11 +0100151static inline void debug_writel(unsigned long val)
152{
153 if (outer_cache.set_debug)
Russell King2b2a87a2014-03-16 17:19:21 +0000154 l2c_set_debug(l2x0_base, val);
Will Deaconab4d5362012-04-20 17:22:11 +0100155}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100156
Will Deaconab4d5362012-04-20 17:22:11 +0100157static void pl310_set_debug(unsigned long val)
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100158{
159 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
160}
161#else
162/* Optimised out for non-errata case */
163static inline void debug_writel(unsigned long val)
164{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100165}
166
Will Deaconab4d5362012-04-20 17:22:11 +0100167#define pl310_set_debug NULL
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100168#endif
169
170#ifdef CONFIG_PL310_ERRATA_588369
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100171static inline void l2x0_flush_line(unsigned long addr)
172{
173 void __iomem *base = l2x0_base;
174
175 /* Clean by PA followed by Invalidate by PA */
176 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100177 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100178 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100179 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100180}
181#else
182
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100183static inline void l2x0_flush_line(unsigned long addr)
184{
185 void __iomem *base = l2x0_base;
186 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100187 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100188}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100189#endif
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100190
Catalin Marinas23107c52010-03-24 16:48:53 +0100191static void l2x0_cache_sync(void)
192{
193 unsigned long flags;
194
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500195 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100196 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500197 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100198}
199
Will Deacon38a89142011-07-01 14:36:19 +0100200static void __l2x0_flush_all(void)
201{
202 debug_writel(0x03);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000203 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
Will Deacon38a89142011-07-01 14:36:19 +0100204 cache_sync();
205 debug_writel(0x00);
206}
207
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530208static void l2x0_flush_all(void)
209{
210 unsigned long flags;
211
212 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500213 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100214 __l2x0_flush_all();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500215 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530216}
217
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530218static void l2x0_clean_all(void)
219{
220 unsigned long flags;
221
222 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500223 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000224 __l2c_op_way(l2x0_base + L2X0_CLEAN_WAY);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530225 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500226 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530227}
228
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530229static void l2x0_inv_all(void)
Catalin Marinas382266a2007-02-05 14:48:19 +0100230{
Russell King0eb948d2009-11-19 11:12:15 +0000231 unsigned long flags;
232
Catalin Marinas382266a2007-02-05 14:48:19 +0100233 /* invalidate all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500234 raw_spin_lock_irqsave(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530235 /* Invalidating when L2 is enabled is a nono */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100236 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000237 __l2c_op_way(l2x0_base + L2X0_INV_WAY);
Catalin Marinas382266a2007-02-05 14:48:19 +0100238 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500239 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100240}
241
242static void l2x0_inv_range(unsigned long start, unsigned long end)
243{
Russell King3d107432009-11-19 11:41:09 +0000244 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000245 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100246
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500247 raw_spin_lock_irqsave(&l2x0_lock, flags);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100248 if (start & (CACHE_LINE_SIZE - 1)) {
249 start &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100250 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100251 l2x0_flush_line(start);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100252 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100253 start += CACHE_LINE_SIZE;
254 }
255
256 if (end & (CACHE_LINE_SIZE - 1)) {
257 end &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100258 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100259 l2x0_flush_line(end);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100260 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100261 }
262
Russell King0eb948d2009-11-19 11:12:15 +0000263 while (start < end) {
264 unsigned long blk_end = start + min(end - start, 4096UL);
265
266 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100267 l2x0_inv_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000268 start += CACHE_LINE_SIZE;
269 }
270
271 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500272 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
273 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000274 }
275 }
Russell King3d107432009-11-19 11:41:09 +0000276 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100277 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500278 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100279}
280
281static void l2x0_clean_range(unsigned long start, unsigned long end)
282{
Russell King3d107432009-11-19 11:41:09 +0000283 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000284 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100285
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530286 if ((end - start) >= l2x0_size) {
287 l2x0_clean_all();
288 return;
289 }
290
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500291 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100292 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000293 while (start < end) {
294 unsigned long blk_end = start + min(end - start, 4096UL);
295
296 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100297 l2x0_clean_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000298 start += CACHE_LINE_SIZE;
299 }
300
301 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500302 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
303 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000304 }
305 }
Russell King3d107432009-11-19 11:41:09 +0000306 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100307 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500308 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100309}
310
311static void l2x0_flush_range(unsigned long start, unsigned long end)
312{
Russell King3d107432009-11-19 11:41:09 +0000313 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000314 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100315
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530316 if ((end - start) >= l2x0_size) {
317 l2x0_flush_all();
318 return;
319 }
320
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500321 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100322 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000323 while (start < end) {
324 unsigned long blk_end = start + min(end - start, 4096UL);
325
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100326 debug_writel(0x03);
Russell King0eb948d2009-11-19 11:12:15 +0000327 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100328 l2x0_flush_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000329 start += CACHE_LINE_SIZE;
330 }
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100331 debug_writel(0x00);
Russell King0eb948d2009-11-19 11:12:15 +0000332
333 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500334 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
335 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000336 }
337 }
Russell King3d107432009-11-19 11:41:09 +0000338 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100339 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500340 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100341}
342
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530343static void l2x0_disable(void)
344{
345 unsigned long flags;
346
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500347 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100348 __l2x0_flush_all();
349 writel_relaxed(0, l2x0_base + L2X0_CTRL);
Will Deacon9781aa82013-06-12 09:59:59 +0100350 dsb(st);
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500351 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530352}
353
Russell King3e175ca2011-09-18 11:27:30 +0100354static void l2x0_unlock(u32 cache_id)
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100355{
356 int lockregs;
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100357
Rob Herring6e7acee2013-03-25 17:02:48 +0100358 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100359 case L2X0_CACHE_ID_PART_L310:
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100360 lockregs = 8;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100361 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100362 default:
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100363 /* L210 and unknown types */
364 lockregs = 1;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100365 break;
366 }
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100367
Russell King37abcdb2014-03-15 16:47:50 +0000368 l2c_unlock(l2x0_base, lockregs);
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100369}
370
Russell King3b8bad52014-03-15 16:47:57 +0000371static void l2x0_enable(void __iomem *base, u32 aux, unsigned num_lock)
372{
Russell King3b8bad52014-03-15 16:47:57 +0000373 /* l2x0 controller is disabled */
374 writel_relaxed(aux, base + L2X0_AUX_CTRL);
375
Russell King17f3f992014-03-17 17:15:02 +0000376 /* Make sure that I&D is not locked down when starting */
377 l2x0_unlock(readl_relaxed(base + L2X0_CACHE_ID));
378
Russell King3b8bad52014-03-15 16:47:57 +0000379 l2x0_inv_all();
380
381 /* enable L2X0 */
382 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
383}
384
Russell King96054b02014-03-15 16:47:52 +0000385static const struct l2c_init_data l2x0_init_fns __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000386 .enable = l2x0_enable,
Russell King96054b02014-03-15 16:47:52 +0000387 .outer_cache = {
388 .inv_range = l2x0_inv_range,
389 .clean_range = l2x0_clean_range,
390 .flush_range = l2x0_flush_range,
391 .flush_all = l2x0_flush_all,
392 .disable = l2x0_disable,
393 .sync = l2x0_cache_sync,
394 },
395};
396
397static void __init __l2c_init(const struct l2c_init_data *data,
398 u32 aux_val, u32 aux_mask, u32 cache_id)
Catalin Marinas382266a2007-02-05 14:48:19 +0100399{
Russell King3e175ca2011-09-18 11:27:30 +0100400 u32 aux;
Russell King3e175ca2011-09-18 11:27:30 +0100401 u32 way_size = 0;
Jason McMullan64039be2010-05-05 18:59:37 +0100402 int ways;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100403 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
Jason McMullan64039be2010-05-05 18:59:37 +0100404 const char *type;
Catalin Marinas382266a2007-02-05 14:48:19 +0100405
Russell Kingc40e7eb2014-03-15 16:48:04 +0000406 /*
407 * It is strange to save the register state before initialisation,
408 * but hey, this is what the DT implementations decided to do.
409 */
410 if (data->save)
411 data->save(l2x0_base);
412
Catalin Marinas6775a552010-07-28 22:01:25 +0100413 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100414
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100415 aux &= aux_mask;
416 aux |= aux_val;
417
Jason McMullan64039be2010-05-05 18:59:37 +0100418 /* Determine the number of ways */
Rob Herring6e7acee2013-03-25 17:02:48 +0100419 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100420 case L2X0_CACHE_ID_PART_L310:
421 if (aux & (1 << 16))
422 ways = 16;
423 else
424 ways = 8;
425 type = "L310";
Will Deaconf154fe92012-04-20 17:21:08 +0100426#ifdef CONFIG_PL310_ERRATA_753970
427 /* Unmapped register. */
428 sync_reg_offset = L2X0_DUMMY_REG;
429#endif
Jason McMullan64039be2010-05-05 18:59:37 +0100430 break;
431 case L2X0_CACHE_ID_PART_L210:
432 ways = (aux >> 13) & 0xf;
433 type = "L210";
434 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100435
436 case AURORA_CACHE_ID:
437 sync_reg_offset = AURORA_SYNC_REG;
438 ways = (aux >> 13) & 0xf;
439 ways = 2 << ((ways + 1) >> 2);
440 way_size_shift = AURORA_WAY_SIZE_SHIFT;
441 type = "Aurora";
442 break;
Jason McMullan64039be2010-05-05 18:59:37 +0100443 default:
444 /* Assume unknown chips have 8 ways */
445 ways = 8;
446 type = "L2x0 series";
447 break;
448 }
449
450 l2x0_way_mask = (1 << ways) - 1;
451
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100452 /*
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530453 * L2 cache Size = Way size * Number of ways
454 */
455 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100456 way_size = 1 << (way_size + way_size_shift);
457
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530458 l2x0_size = ways * way_size * SZ_1K;
459
460 /*
Russell King3b8bad52014-03-15 16:47:57 +0000461 * Check if l2x0 controller is already enabled. If we are booting
462 * in non-secure mode accessing the below registers will fault.
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100463 */
Russell King3b8bad52014-03-15 16:47:57 +0000464 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
465 data->enable(l2x0_base, aux, data->num_lock);
Catalin Marinas382266a2007-02-05 14:48:19 +0100466
Yilu Mao9d4876f2012-09-03 09:14:56 +0100467 /* Re-read it in case some bits are reserved. */
468 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
469
470 /* Save the value for resuming. */
471 l2x0_saved_regs.aux_ctrl = aux;
472
Russell King96054b02014-03-15 16:47:52 +0000473 outer_cache = data->outer_cache;
474
475 if ((cache_id & L2X0_CACHE_ID_PART_MASK) == L2X0_CACHE_ID_PART_L310 &&
476 (cache_id & L2X0_CACHE_ID_RTL_MASK) <= L310_CACHE_ID_RTL_R3P0)
477 outer_cache.set_debug = pl310_set_debug;
Catalin Marinas382266a2007-02-05 14:48:19 +0100478
Fabio Estevamc477b8d2013-08-16 13:04:32 +0100479 pr_info("%s cache controller enabled\n", type);
480 pr_info("l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d kB\n",
481 ways, cache_id, aux, l2x0_size >> 10);
Catalin Marinas382266a2007-02-05 14:48:19 +0100482}
Rob Herring8c369262011-08-03 18:12:05 +0100483
Russell King96054b02014-03-15 16:47:52 +0000484void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
485{
486 u32 cache_id;
487
488 l2x0_base = base;
489
490 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
491
492 __l2c_init(&l2x0_init_fns, aux_val, aux_mask, cache_id);
493}
494
Rob Herring8c369262011-08-03 18:12:05 +0100495#ifdef CONFIG_OF
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100496static int l2_wt_override;
497
Russell King96054b02014-03-15 16:47:52 +0000498/* Aurora don't have the cache ID register available, so we have to
499 * pass it though the device tree */
500static u32 cache_id_part_number_from_dt;
501
Russell Kingda3627f2014-03-15 16:48:06 +0000502static void __init l2x0_of_parse(const struct device_node *np,
503 u32 *aux_val, u32 *aux_mask)
504{
505 u32 data[2] = { 0, 0 };
506 u32 tag = 0;
507 u32 dirty = 0;
508 u32 val = 0, mask = 0;
509
510 of_property_read_u32(np, "arm,tag-latency", &tag);
511 if (tag) {
512 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
513 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
514 }
515
516 of_property_read_u32_array(np, "arm,data-latency",
517 data, ARRAY_SIZE(data));
518 if (data[0] && data[1]) {
519 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
520 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
521 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
522 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
523 }
524
525 of_property_read_u32(np, "arm,dirty-latency", &dirty);
526 if (dirty) {
527 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
528 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
529 }
530
531 *aux_val &= ~mask;
532 *aux_val |= val;
533 *aux_mask &= ~mask;
534}
535
536static void l2x0_resume(void)
537{
538 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
539 /* restore aux ctrl and enable l2 */
540 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
541
542 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
543 L2X0_AUX_CTRL);
544
545 l2x0_inv_all();
546
547 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
548 }
549}
550
551static const struct l2c_init_data of_l2x0_data __initconst = {
552 .of_parse = l2x0_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000553 .enable = l2x0_enable,
Russell Kingda3627f2014-03-15 16:48:06 +0000554 .outer_cache = {
555 .inv_range = l2x0_inv_range,
556 .clean_range = l2x0_clean_range,
557 .flush_range = l2x0_flush_range,
558 .flush_all = l2x0_flush_all,
559 .disable = l2x0_disable,
560 .sync = l2x0_cache_sync,
561 .resume = l2x0_resume,
562 },
563};
564
565static void __init pl310_of_parse(const struct device_node *np,
566 u32 *aux_val, u32 *aux_mask)
567{
568 u32 data[3] = { 0, 0, 0 };
569 u32 tag[3] = { 0, 0, 0 };
570 u32 filter[2] = { 0, 0 };
571
572 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
573 if (tag[0] && tag[1] && tag[2])
574 writel_relaxed(
575 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
576 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
577 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
578 l2x0_base + L2X0_TAG_LATENCY_CTRL);
579
580 of_property_read_u32_array(np, "arm,data-latency",
581 data, ARRAY_SIZE(data));
582 if (data[0] && data[1] && data[2])
583 writel_relaxed(
584 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
585 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
586 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
587 l2x0_base + L2X0_DATA_LATENCY_CTRL);
588
589 of_property_read_u32_array(np, "arm,filter-ranges",
590 filter, ARRAY_SIZE(filter));
591 if (filter[1]) {
592 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
593 l2x0_base + L2X0_ADDR_FILTER_END);
594 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
595 l2x0_base + L2X0_ADDR_FILTER_START);
596 }
597}
598
599static void __init pl310_save(void __iomem *base)
600{
601 u32 l2x0_revision = readl_relaxed(base + L2X0_CACHE_ID) &
602 L2X0_CACHE_ID_RTL_MASK;
603
604 l2x0_saved_regs.tag_latency = readl_relaxed(base +
605 L2X0_TAG_LATENCY_CTRL);
606 l2x0_saved_regs.data_latency = readl_relaxed(base +
607 L2X0_DATA_LATENCY_CTRL);
608 l2x0_saved_regs.filter_end = readl_relaxed(base +
609 L2X0_ADDR_FILTER_END);
610 l2x0_saved_regs.filter_start = readl_relaxed(base +
611 L2X0_ADDR_FILTER_START);
612
613 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) {
614 /*
615 * From r2p0, there is Prefetch offset/control register
616 */
617 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
618 L2X0_PREFETCH_CTRL);
619 /*
620 * From r3p0, there is Power control register
621 */
622 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0)
623 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
624 L2X0_POWER_CTRL);
625 }
626}
627
628static void pl310_resume(void)
629{
630 u32 l2x0_revision;
631
632 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
633 /* restore pl310 setup */
634 writel_relaxed(l2x0_saved_regs.tag_latency,
635 l2x0_base + L2X0_TAG_LATENCY_CTRL);
636 writel_relaxed(l2x0_saved_regs.data_latency,
637 l2x0_base + L2X0_DATA_LATENCY_CTRL);
638 writel_relaxed(l2x0_saved_regs.filter_end,
639 l2x0_base + L2X0_ADDR_FILTER_END);
640 writel_relaxed(l2x0_saved_regs.filter_start,
641 l2x0_base + L2X0_ADDR_FILTER_START);
642
643 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
644 L2X0_CACHE_ID_RTL_MASK;
645
646 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) {
647 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
648 l2x0_base + L2X0_PREFETCH_CTRL);
649 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0)
650 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
651 l2x0_base + L2X0_POWER_CTRL);
652 }
653 }
654
655 l2x0_resume();
656}
657
658static const struct l2c_init_data of_pl310_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000659 .num_lock = 8,
Russell Kingda3627f2014-03-15 16:48:06 +0000660 .of_parse = pl310_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000661 .enable = l2c_enable,
Russell Kingda3627f2014-03-15 16:48:06 +0000662 .save = pl310_save,
663 .outer_cache = {
664 .inv_range = l2x0_inv_range,
665 .clean_range = l2x0_clean_range,
666 .flush_range = l2x0_flush_range,
667 .flush_all = l2x0_flush_all,
668 .disable = l2x0_disable,
669 .sync = l2x0_cache_sync,
670 .resume = pl310_resume,
671 },
672};
673
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100674/*
675 * Note that the end addresses passed to Linux primitives are
676 * noninclusive, while the hardware cache range operations use
677 * inclusive start and end addresses.
678 */
679static unsigned long calc_range_end(unsigned long start, unsigned long end)
680{
681 /*
682 * Limit the number of cache lines processed at once,
683 * since cache range operations stall the CPU pipeline
684 * until completion.
685 */
686 if (end > start + MAX_RANGE_SIZE)
687 end = start + MAX_RANGE_SIZE;
688
689 /*
690 * Cache range operations can't straddle a page boundary.
691 */
692 if (end > PAGE_ALIGN(start+1))
693 end = PAGE_ALIGN(start+1);
694
695 return end;
696}
697
698/*
699 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
700 * and range operations only do a TLB lookup on the start address.
701 */
702static void aurora_pa_range(unsigned long start, unsigned long end,
703 unsigned long offset)
704{
705 unsigned long flags;
706
707 raw_spin_lock_irqsave(&l2x0_lock, flags);
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +0100708 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
709 writel_relaxed(end, l2x0_base + offset);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100710 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
711
712 cache_sync();
713}
714
715static void aurora_inv_range(unsigned long start, unsigned long end)
716{
717 /*
718 * round start and end adresses up to cache line size
719 */
720 start &= ~(CACHE_LINE_SIZE - 1);
721 end = ALIGN(end, CACHE_LINE_SIZE);
722
723 /*
724 * Invalidate all full cache lines between 'start' and 'end'.
725 */
726 while (start < end) {
727 unsigned long range_end = calc_range_end(start, end);
728 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
729 AURORA_INVAL_RANGE_REG);
730 start = range_end;
731 }
732}
733
734static void aurora_clean_range(unsigned long start, unsigned long end)
735{
736 /*
737 * If L2 is forced to WT, the L2 will always be clean and we
738 * don't need to do anything here.
739 */
740 if (!l2_wt_override) {
741 start &= ~(CACHE_LINE_SIZE - 1);
742 end = ALIGN(end, CACHE_LINE_SIZE);
743 while (start != end) {
744 unsigned long range_end = calc_range_end(start, end);
745 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
746 AURORA_CLEAN_RANGE_REG);
747 start = range_end;
748 }
749 }
750}
751
752static void aurora_flush_range(unsigned long start, unsigned long end)
753{
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100754 start &= ~(CACHE_LINE_SIZE - 1);
755 end = ALIGN(end, CACHE_LINE_SIZE);
756 while (start != end) {
757 unsigned long range_end = calc_range_end(start, end);
758 /*
759 * If L2 is forced to WT, the L2 will always be clean and we
760 * just need to invalidate.
761 */
762 if (l2_wt_override)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100763 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100764 AURORA_INVAL_RANGE_REG);
765 else
766 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
767 AURORA_FLUSH_RANGE_REG);
768 start = range_end;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100769 }
770}
771
Russell Kingda3627f2014-03-15 16:48:06 +0000772static void aurora_save(void __iomem *base)
773{
774 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
775 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
776}
777
778static void aurora_resume(void)
779{
780 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
781 writel_relaxed(l2x0_saved_regs.aux_ctrl,
782 l2x0_base + L2X0_AUX_CTRL);
783 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
784 }
785}
786
Russell King40266d62014-03-15 16:47:59 +0000787/*
788 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
789 * broadcasting of cache commands to L2.
790 */
791static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
792 unsigned num_lock)
Russell Kingda3627f2014-03-15 16:48:06 +0000793{
Russell King40266d62014-03-15 16:47:59 +0000794 u32 u;
795
796 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
Russell Kingda3627f2014-03-15 16:48:06 +0000797 u |= AURORA_CTRL_FW; /* Set the FW bit */
Russell King40266d62014-03-15 16:47:59 +0000798 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
799
Russell Kingda3627f2014-03-15 16:48:06 +0000800 isb();
Russell King40266d62014-03-15 16:47:59 +0000801
802 l2c_enable(base, aux, num_lock);
Russell Kingda3627f2014-03-15 16:48:06 +0000803}
804
805static void __init aurora_of_parse(const struct device_node *np,
806 u32 *aux_val, u32 *aux_mask)
807{
808 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
809 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
810
811 of_property_read_u32(np, "cache-id-part",
812 &cache_id_part_number_from_dt);
813
814 /* Determine and save the write policy */
815 l2_wt_override = of_property_read_bool(np, "wt-override");
816
817 if (l2_wt_override) {
818 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
819 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
820 }
821
822 *aux_val &= ~mask;
823 *aux_val |= val;
824 *aux_mask &= ~mask;
825}
826
827static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000828 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +0000829 .of_parse = aurora_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000830 .enable = l2c_enable,
Russell Kingda3627f2014-03-15 16:48:06 +0000831 .save = aurora_save,
832 .outer_cache = {
833 .inv_range = aurora_inv_range,
834 .clean_range = aurora_clean_range,
835 .flush_range = aurora_flush_range,
836 .flush_all = l2x0_flush_all,
837 .disable = l2x0_disable,
838 .sync = l2x0_cache_sync,
839 .resume = aurora_resume,
840 },
841};
842
843static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000844 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +0000845 .of_parse = aurora_of_parse,
Russell King40266d62014-03-15 16:47:59 +0000846 .enable = aurora_enable_no_outer,
Russell Kingda3627f2014-03-15 16:48:06 +0000847 .save = aurora_save,
848 .outer_cache = {
849 .resume = aurora_resume,
850 },
851};
852
Christian Daudt3b656fe2013-05-09 22:21:01 +0100853/*
854 * For certain Broadcom SoCs, depending on the address range, different offsets
855 * need to be added to the address before passing it to L2 for
856 * invalidation/clean/flush
857 *
858 * Section Address Range Offset EMI
859 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
860 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
861 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
862 *
863 * When the start and end addresses have crossed two different sections, we
864 * need to break the L2 operation into two, each within its own section.
865 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
866 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
867 * 0xC0000000 - 0xC0001000
868 *
869 * Note 1:
870 * By breaking a single L2 operation into two, we may potentially suffer some
871 * performance hit, but keep in mind the cross section case is very rare
872 *
873 * Note 2:
874 * We do not need to handle the case when the start address is in
875 * Section 1 and the end address is in Section 3, since it is not a valid use
876 * case
877 *
878 * Note 3:
879 * Section 1 in practical terms can no longer be used on rev A2. Because of
880 * that the code does not need to handle section 1 at all.
881 *
882 */
883#define BCM_SYS_EMI_START_ADDR 0x40000000UL
884#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
885
886#define BCM_SYS_EMI_OFFSET 0x40000000UL
887#define BCM_VC_EMI_OFFSET 0x80000000UL
888
889static inline int bcm_addr_is_sys_emi(unsigned long addr)
890{
891 return (addr >= BCM_SYS_EMI_START_ADDR) &&
892 (addr < BCM_VC_EMI_SEC3_START_ADDR);
893}
894
895static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
896{
897 if (bcm_addr_is_sys_emi(addr))
898 return addr + BCM_SYS_EMI_OFFSET;
899 else
900 return addr + BCM_VC_EMI_OFFSET;
901}
902
903static void bcm_inv_range(unsigned long start, unsigned long end)
904{
905 unsigned long new_start, new_end;
906
907 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
908
909 if (unlikely(end <= start))
910 return;
911
912 new_start = bcm_l2_phys_addr(start);
913 new_end = bcm_l2_phys_addr(end);
914
915 /* normal case, no cross section between start and end */
916 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
917 l2x0_inv_range(new_start, new_end);
918 return;
919 }
920
921 /* They cross sections, so it can only be a cross from section
922 * 2 to section 3
923 */
924 l2x0_inv_range(new_start,
925 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
926 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
927 new_end);
928}
929
930static void bcm_clean_range(unsigned long start, unsigned long end)
931{
932 unsigned long new_start, new_end;
933
934 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
935
936 if (unlikely(end <= start))
937 return;
938
939 if ((end - start) >= l2x0_size) {
940 l2x0_clean_all();
941 return;
942 }
943
944 new_start = bcm_l2_phys_addr(start);
945 new_end = bcm_l2_phys_addr(end);
946
947 /* normal case, no cross section between start and end */
948 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
949 l2x0_clean_range(new_start, new_end);
950 return;
951 }
952
953 /* They cross sections, so it can only be a cross from section
954 * 2 to section 3
955 */
956 l2x0_clean_range(new_start,
957 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
958 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
959 new_end);
960}
961
962static void bcm_flush_range(unsigned long start, unsigned long end)
963{
964 unsigned long new_start, new_end;
965
966 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
967
968 if (unlikely(end <= start))
969 return;
970
971 if ((end - start) >= l2x0_size) {
972 l2x0_flush_all();
973 return;
974 }
975
976 new_start = bcm_l2_phys_addr(start);
977 new_end = bcm_l2_phys_addr(end);
978
979 /* normal case, no cross section between start and end */
980 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
981 l2x0_flush_range(new_start, new_end);
982 return;
983 }
984
985 /* They cross sections, so it can only be a cross from section
986 * 2 to section 3
987 */
988 l2x0_flush_range(new_start,
989 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
990 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
991 new_end);
992}
993
Russell Kingda3627f2014-03-15 16:48:06 +0000994static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000995 .num_lock = 8,
Russell Kingda3627f2014-03-15 16:48:06 +0000996 .of_parse = pl310_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000997 .enable = l2c_enable,
Russell Kingda3627f2014-03-15 16:48:06 +0000998 .save = pl310_save,
999 .outer_cache = {
1000 .inv_range = bcm_inv_range,
1001 .clean_range = bcm_clean_range,
1002 .flush_range = bcm_flush_range,
1003 .flush_all = l2x0_flush_all,
1004 .disable = l2x0_disable,
1005 .sync = l2x0_cache_sync,
1006 .resume = pl310_resume,
1007 },
1008};
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001009
Russell King9846dfc2014-03-15 16:47:55 +00001010static void __init tauros3_save(void __iomem *base)
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001011{
1012 l2x0_saved_regs.aux2_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001013 readl_relaxed(base + TAUROS3_AUX2_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001014 l2x0_saved_regs.prefetch_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001015 readl_relaxed(base + L2X0_PREFETCH_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001016}
1017
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001018static void tauros3_resume(void)
1019{
1020 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1021 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1022 l2x0_base + TAUROS3_AUX2_CTRL);
1023 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1024 l2x0_base + L2X0_PREFETCH_CTRL);
1025 }
1026
1027 l2x0_resume();
1028}
1029
Russell Kingc02642b2014-03-15 16:47:54 +00001030static const struct l2c_init_data of_tauros3_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +00001031 .num_lock = 8,
1032 .enable = l2c_enable,
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001033 .save = tauros3_save,
1034 /* Tauros3 broadcasts L1 cache operations to L2 */
1035 .outer_cache = {
1036 .resume = tauros3_resume,
1037 },
1038};
1039
Russell Kinga65bb922014-03-15 16:48:01 +00001040#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
Rob Herring8c369262011-08-03 18:12:05 +01001041static const struct of_device_id l2x0_ids[] __initconst = {
Russell Kingc02642b2014-03-15 16:47:54 +00001042 L2C_ID("arm,l210-cache", of_l2x0_data),
1043 L2C_ID("arm,l220-cache", of_l2x0_data),
1044 L2C_ID("arm,pl310-cache", of_pl310_data),
1045 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1046 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1047 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1048 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
Russell Kinga65bb922014-03-15 16:48:01 +00001049 /* Deprecated IDs */
Russell Kingc02642b2014-03-15 16:47:54 +00001050 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
Rob Herring8c369262011-08-03 18:12:05 +01001051 {}
1052};
1053
Russell King3e175ca2011-09-18 11:27:30 +01001054int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +01001055{
Russell Kingc02642b2014-03-15 16:47:54 +00001056 const struct l2c_init_data *data;
Rob Herring8c369262011-08-03 18:12:05 +01001057 struct device_node *np;
Barry Song91c2ebb2011-09-30 14:43:12 +01001058 struct resource res;
Russell King96054b02014-03-15 16:47:52 +00001059 u32 cache_id;
Rob Herring8c369262011-08-03 18:12:05 +01001060
1061 np = of_find_matching_node(NULL, l2x0_ids);
1062 if (!np)
1063 return -ENODEV;
Barry Song91c2ebb2011-09-30 14:43:12 +01001064
1065 if (of_address_to_resource(np, 0, &res))
1066 return -ENODEV;
1067
1068 l2x0_base = ioremap(res.start, resource_size(&res));
Rob Herring8c369262011-08-03 18:12:05 +01001069 if (!l2x0_base)
1070 return -ENOMEM;
1071
Barry Song91c2ebb2011-09-30 14:43:12 +01001072 l2x0_saved_regs.phy_base = res.start;
1073
1074 data = of_match_node(l2x0_ids, np)->data;
1075
Rob Herring8c369262011-08-03 18:12:05 +01001076 /* L2 configuration can only be changed if the cache is disabled */
Russell King40266d62014-03-15 16:47:59 +00001077 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
Russell Kingc02642b2014-03-15 16:47:54 +00001078 if (data->of_parse)
1079 data->of_parse(np, &aux_val, &aux_mask);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001080
Russell King96054b02014-03-15 16:47:52 +00001081 if (cache_id_part_number_from_dt)
1082 cache_id = cache_id_part_number_from_dt;
1083 else
1084 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1085
1086 __l2c_init(data, aux_val, aux_mask, cache_id);
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001087
Rob Herring8c369262011-08-03 18:12:05 +01001088 return 0;
1089}
1090#endif