blob: 6161232c8a850f46268dadfd73ee3dd28aa9578c [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
Rob Herring8c369262011-08-03 18:12:05 +010019#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010020#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Rob Herring8c369262011-08-03 18:12:05 +010023#include <linux/of.h>
24#include <linux/of_address.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025
26#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010027#include <asm/hardware/cache-l2x0.h>
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +010028#include "cache-tauros3.h"
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010029#include "cache-aurora-l2.h"
Catalin Marinas382266a2007-02-05 14:48:19 +010030
Russell Kingc02642b2014-03-15 16:47:54 +000031struct l2c_init_data {
Russell King3b8bad52014-03-15 16:47:57 +000032 unsigned num_lock;
Russell Kingc02642b2014-03-15 16:47:54 +000033 void (*of_parse)(const struct device_node *, u32 *, u32 *);
Russell King3b8bad52014-03-15 16:47:57 +000034 void (*enable)(void __iomem *, u32, unsigned);
Russell King75461f52014-03-15 16:48:07 +000035 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
Russell King9846dfc2014-03-15 16:47:55 +000036 void (*save)(void __iomem *);
Russell Kingc02642b2014-03-15 16:47:54 +000037 struct outer_cache_fns outer_cache;
38};
39
Catalin Marinas382266a2007-02-05 14:48:19 +010040#define CACHE_LINE_SIZE 32
41
42static void __iomem *l2x0_base;
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050043static DEFINE_RAW_SPINLOCK(l2x0_lock);
Russell King3e175ca2011-09-18 11:27:30 +010044static u32 l2x0_way_mask; /* Bitmask of active ways */
45static u32 l2x0_size;
Will Deaconf154fe92012-04-20 17:21:08 +010046static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
Catalin Marinas382266a2007-02-05 14:48:19 +010047
Barry Song91c2ebb2011-09-30 14:43:12 +010048struct l2x0_regs l2x0_saved_regs;
49
Russell King37abcdb2014-03-15 16:47:50 +000050/*
51 * Common code for all cache controllers.
52 */
Russell King83841fe2014-03-15 16:48:14 +000053static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010054{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010055 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010056 while (readl_relaxed(reg) & mask)
Barry Song1caf3092011-09-09 10:30:34 +010057 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010058}
59
Russell King2b2a87a2014-03-16 17:19:21 +000060/*
61 * This should only be called when we have a requirement that the
62 * register be written due to a work-around, as platforms running
63 * in non-secure mode may not be able to access this register.
64 */
65static inline void l2c_set_debug(void __iomem *base, unsigned long val)
66{
67 outer_cache.set_debug(val);
68}
69
Russell Kingdf5dd4c2014-03-15 16:47:56 +000070static void __l2c_op_way(void __iomem *reg)
71{
72 writel_relaxed(l2x0_way_mask, reg);
Russell King83841fe2014-03-15 16:48:14 +000073 l2c_wait_mask(reg, l2x0_way_mask);
Russell Kingdf5dd4c2014-03-15 16:47:56 +000074}
75
Russell King37abcdb2014-03-15 16:47:50 +000076static inline void l2c_unlock(void __iomem *base, unsigned num)
77{
78 unsigned i;
79
80 for (i = 0; i < num; i++) {
81 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
82 i * L2X0_LOCKDOWN_STRIDE);
83 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
84 i * L2X0_LOCKDOWN_STRIDE);
85 }
86}
87
Russell King3b8bad52014-03-15 16:47:57 +000088/*
89 * Enable the L2 cache controller. This function must only be
90 * called when the cache controller is known to be disabled.
91 */
92static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
93{
94 unsigned long flags;
95
Russell King9a07f272014-03-17 20:10:31 +000096 /* Only write the aux register if it needs changing */
97 if (readl_relaxed(base + L2X0_AUX_CTRL) != aux)
98 writel_relaxed(aux, base + L2X0_AUX_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +000099
Russell King17f3f992014-03-17 17:15:02 +0000100 l2c_unlock(base, num_lock);
101
Russell King3b8bad52014-03-15 16:47:57 +0000102 local_irq_save(flags);
103 __l2c_op_way(base + L2X0_INV_WAY);
104 writel_relaxed(0, base + sync_reg_offset);
105 l2c_wait_mask(base + sync_reg_offset, 1);
106 local_irq_restore(flags);
107
108 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
109}
110
111static void l2c_disable(void)
112{
113 void __iomem *base = l2x0_base;
114
115 outer_cache.flush_all();
116 writel_relaxed(0, base + L2X0_CTRL);
117 dsb(st);
118}
119
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100120#ifdef CONFIG_CACHE_PL310
121static inline void cache_wait(void __iomem *reg, unsigned long mask)
122{
123 /* cache operations by line are atomic on PL310 */
124}
125#else
Russell King83841fe2014-03-15 16:48:14 +0000126#define cache_wait l2c_wait_mask
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100127#endif
128
Catalin Marinas382266a2007-02-05 14:48:19 +0100129static inline void cache_sync(void)
130{
Russell King3d107432009-11-19 11:41:09 +0000131 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +0100132
Will Deaconf154fe92012-04-20 17:21:08 +0100133 writel_relaxed(0, base + sync_reg_offset);
Russell King3d107432009-11-19 11:41:09 +0000134 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100135}
136
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100137static inline void l2x0_clean_line(unsigned long addr)
138{
139 void __iomem *base = l2x0_base;
140 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100141 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100142}
143
144static inline void l2x0_inv_line(unsigned long addr)
145{
146 void __iomem *base = l2x0_base;
147 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100148 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100149}
150
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100151#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Will Deaconab4d5362012-04-20 17:22:11 +0100152static inline void debug_writel(unsigned long val)
153{
154 if (outer_cache.set_debug)
Russell King2b2a87a2014-03-16 17:19:21 +0000155 l2c_set_debug(l2x0_base, val);
Will Deaconab4d5362012-04-20 17:22:11 +0100156}
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100157#else
158/* Optimised out for non-errata case */
159static inline void debug_writel(unsigned long val)
160{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100161}
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100162#endif
163
164#ifdef CONFIG_PL310_ERRATA_588369
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100165static inline void l2x0_flush_line(unsigned long addr)
166{
167 void __iomem *base = l2x0_base;
168
169 /* Clean by PA followed by Invalidate by PA */
170 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100171 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100172 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100173 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100174}
175#else
176
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100177static inline void l2x0_flush_line(unsigned long addr)
178{
179 void __iomem *base = l2x0_base;
180 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100181 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100182}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100183#endif
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100184
Catalin Marinas23107c52010-03-24 16:48:53 +0100185static void l2x0_cache_sync(void)
186{
187 unsigned long flags;
188
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500189 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100190 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500191 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100192}
193
Will Deacon38a89142011-07-01 14:36:19 +0100194static void __l2x0_flush_all(void)
195{
196 debug_writel(0x03);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000197 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
Will Deacon38a89142011-07-01 14:36:19 +0100198 cache_sync();
199 debug_writel(0x00);
200}
201
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530202static void l2x0_flush_all(void)
203{
204 unsigned long flags;
205
206 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500207 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100208 __l2x0_flush_all();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500209 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530210}
211
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530212static void l2x0_clean_all(void)
213{
214 unsigned long flags;
215
216 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500217 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000218 __l2c_op_way(l2x0_base + L2X0_CLEAN_WAY);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530219 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500220 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530221}
222
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530223static void l2x0_inv_all(void)
Catalin Marinas382266a2007-02-05 14:48:19 +0100224{
Russell King0eb948d2009-11-19 11:12:15 +0000225 unsigned long flags;
226
Catalin Marinas382266a2007-02-05 14:48:19 +0100227 /* invalidate all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500228 raw_spin_lock_irqsave(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530229 /* Invalidating when L2 is enabled is a nono */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100230 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000231 __l2c_op_way(l2x0_base + L2X0_INV_WAY);
Catalin Marinas382266a2007-02-05 14:48:19 +0100232 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500233 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100234}
235
236static void l2x0_inv_range(unsigned long start, unsigned long end)
237{
Russell King3d107432009-11-19 11:41:09 +0000238 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000239 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100240
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500241 raw_spin_lock_irqsave(&l2x0_lock, flags);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100242 if (start & (CACHE_LINE_SIZE - 1)) {
243 start &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100244 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100245 l2x0_flush_line(start);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100246 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100247 start += CACHE_LINE_SIZE;
248 }
249
250 if (end & (CACHE_LINE_SIZE - 1)) {
251 end &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100252 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100253 l2x0_flush_line(end);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100254 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100255 }
256
Russell King0eb948d2009-11-19 11:12:15 +0000257 while (start < end) {
258 unsigned long blk_end = start + min(end - start, 4096UL);
259
260 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100261 l2x0_inv_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000262 start += CACHE_LINE_SIZE;
263 }
264
265 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500266 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
267 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000268 }
269 }
Russell King3d107432009-11-19 11:41:09 +0000270 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100271 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500272 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100273}
274
275static void l2x0_clean_range(unsigned long start, unsigned long end)
276{
Russell King3d107432009-11-19 11:41:09 +0000277 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000278 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100279
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530280 if ((end - start) >= l2x0_size) {
281 l2x0_clean_all();
282 return;
283 }
284
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500285 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100286 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000287 while (start < end) {
288 unsigned long blk_end = start + min(end - start, 4096UL);
289
290 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100291 l2x0_clean_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000292 start += CACHE_LINE_SIZE;
293 }
294
295 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500296 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
297 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000298 }
299 }
Russell King3d107432009-11-19 11:41:09 +0000300 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100301 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500302 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100303}
304
305static void l2x0_flush_range(unsigned long start, unsigned long end)
306{
Russell King3d107432009-11-19 11:41:09 +0000307 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000308 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100309
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530310 if ((end - start) >= l2x0_size) {
311 l2x0_flush_all();
312 return;
313 }
314
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500315 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100316 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000317 while (start < end) {
318 unsigned long blk_end = start + min(end - start, 4096UL);
319
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100320 debug_writel(0x03);
Russell King0eb948d2009-11-19 11:12:15 +0000321 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100322 l2x0_flush_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000323 start += CACHE_LINE_SIZE;
324 }
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100325 debug_writel(0x00);
Russell King0eb948d2009-11-19 11:12:15 +0000326
327 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500328 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
329 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000330 }
331 }
Russell King3d107432009-11-19 11:41:09 +0000332 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100333 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500334 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100335}
336
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530337static void l2x0_disable(void)
338{
339 unsigned long flags;
340
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500341 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100342 __l2x0_flush_all();
343 writel_relaxed(0, l2x0_base + L2X0_CTRL);
Will Deacon9781aa82013-06-12 09:59:59 +0100344 dsb(st);
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500345 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530346}
347
Russell King3b8bad52014-03-15 16:47:57 +0000348static void l2x0_enable(void __iomem *base, u32 aux, unsigned num_lock)
349{
Russell Kingfaf9b2e2014-03-15 22:49:59 +0000350 unsigned id;
351
352 id = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
353 if (id == L2X0_CACHE_ID_PART_L310)
354 num_lock = 8;
355 else
356 num_lock = 1;
357
Russell King3b8bad52014-03-15 16:47:57 +0000358 /* l2x0 controller is disabled */
359 writel_relaxed(aux, base + L2X0_AUX_CTRL);
360
Russell King17f3f992014-03-17 17:15:02 +0000361 /* Make sure that I&D is not locked down when starting */
Russell Kingfaf9b2e2014-03-15 22:49:59 +0000362 l2c_unlock(base, num_lock);
Russell King17f3f992014-03-17 17:15:02 +0000363
Russell King3b8bad52014-03-15 16:47:57 +0000364 l2x0_inv_all();
365
366 /* enable L2X0 */
367 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
368}
369
Russell Kingb98556f22014-03-15 16:48:11 +0000370static void l2x0_resume(void)
371{
Russell King09a5d182014-03-15 16:48:13 +0000372 void __iomem *base = l2x0_base;
Russell Kingb98556f22014-03-15 16:48:11 +0000373
Russell King09a5d182014-03-15 16:48:13 +0000374 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
375 l2x0_enable(base, l2x0_saved_regs.aux_ctrl, 0);
Russell Kingb98556f22014-03-15 16:48:11 +0000376}
377
Russell King96054b02014-03-15 16:47:52 +0000378static const struct l2c_init_data l2x0_init_fns __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000379 .enable = l2x0_enable,
Russell King96054b02014-03-15 16:47:52 +0000380 .outer_cache = {
381 .inv_range = l2x0_inv_range,
382 .clean_range = l2x0_clean_range,
383 .flush_range = l2x0_flush_range,
384 .flush_all = l2x0_flush_all,
385 .disable = l2x0_disable,
386 .sync = l2x0_cache_sync,
Russell Kingb98556f22014-03-15 16:48:11 +0000387 .resume = l2x0_resume,
Russell King96054b02014-03-15 16:47:52 +0000388 },
389};
390
Russell King75461f52014-03-15 16:48:07 +0000391/*
Russell King6a28cf52014-03-15 18:55:53 +0000392 * L2C-210 specific code.
393 *
394 * The L2C-2x0 PA, set/way and sync operations are atomic, but we must
395 * ensure that no background operation is running. The way operations
396 * are all background tasks.
397 *
398 * While a background operation is in progress, any new operation is
399 * ignored (unspecified whether this causes an error.) Thankfully, not
400 * used on SMP.
401 *
402 * Never has a different sync register other than L2X0_CACHE_SYNC, but
403 * we use sync_reg_offset here so we can share some of this with L2C-310.
404 */
405static void __l2c210_cache_sync(void __iomem *base)
406{
407 writel_relaxed(0, base + sync_reg_offset);
408}
409
410static void __l2c210_op_pa_range(void __iomem *reg, unsigned long start,
411 unsigned long end)
412{
413 while (start < end) {
414 writel_relaxed(start, reg);
415 start += CACHE_LINE_SIZE;
416 }
417}
418
419static void l2c210_inv_range(unsigned long start, unsigned long end)
420{
421 void __iomem *base = l2x0_base;
422
423 if (start & (CACHE_LINE_SIZE - 1)) {
424 start &= ~(CACHE_LINE_SIZE - 1);
425 writel_relaxed(start, base + L2X0_CLEAN_INV_LINE_PA);
426 start += CACHE_LINE_SIZE;
427 }
428
429 if (end & (CACHE_LINE_SIZE - 1)) {
430 end &= ~(CACHE_LINE_SIZE - 1);
431 writel_relaxed(end, base + L2X0_CLEAN_INV_LINE_PA);
432 }
433
434 __l2c210_op_pa_range(base + L2X0_INV_LINE_PA, start, end);
435 __l2c210_cache_sync(base);
436}
437
438static void l2c210_clean_range(unsigned long start, unsigned long end)
439{
440 void __iomem *base = l2x0_base;
441
442 start &= ~(CACHE_LINE_SIZE - 1);
443 __l2c210_op_pa_range(base + L2X0_CLEAN_LINE_PA, start, end);
444 __l2c210_cache_sync(base);
445}
446
447static void l2c210_flush_range(unsigned long start, unsigned long end)
448{
449 void __iomem *base = l2x0_base;
450
451 start &= ~(CACHE_LINE_SIZE - 1);
452 __l2c210_op_pa_range(base + L2X0_CLEAN_INV_LINE_PA, start, end);
453 __l2c210_cache_sync(base);
454}
455
456static void l2c210_flush_all(void)
457{
458 void __iomem *base = l2x0_base;
459
460 BUG_ON(!irqs_disabled());
461
462 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
463 __l2c210_cache_sync(base);
464}
465
466static void l2c210_sync(void)
467{
468 __l2c210_cache_sync(l2x0_base);
469}
470
471static void l2c210_resume(void)
472{
473 void __iomem *base = l2x0_base;
474
475 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
476 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 1);
477}
478
479static const struct l2c_init_data l2c210_data __initconst = {
480 .num_lock = 1,
481 .enable = l2c_enable,
482 .outer_cache = {
483 .inv_range = l2c210_inv_range,
484 .clean_range = l2c210_clean_range,
485 .flush_range = l2c210_flush_range,
486 .flush_all = l2c210_flush_all,
487 .disable = l2c_disable,
488 .sync = l2c210_sync,
489 .resume = l2c210_resume,
490 },
491};
492
493/*
Russell King75461f52014-03-15 16:48:07 +0000494 * L2C-310 specific code.
495 *
496 * Errata:
497 * 588369: PL310 R0P0->R1P0, fixed R2P0.
498 * Affects: all clean+invalidate operations
499 * clean and invalidate skips the invalidate step, so we need to issue
500 * separate operations. We also require the above debug workaround
501 * enclosing this code fragment on affected parts. On unaffected parts,
502 * we must not use this workaround without the debug register writes
503 * to avoid exposing a problem similar to 727915.
504 *
505 * 727915: PL310 R2P0->R3P0, fixed R3P1.
506 * Affects: clean+invalidate by way
507 * clean and invalidate by way runs in the background, and a store can
508 * hit the line between the clean operation and invalidate operation,
509 * resulting in the store being lost.
510 *
511 * 753970: PL310 R3P0, fixed R3P1.
512 * Affects: sync
513 * prevents merging writes after the sync operation, until another L2C
514 * operation is performed (or a number of other conditions.)
515 *
516 * 769419: PL310 R0P0->R3P1, fixed R3P2.
517 * Affects: store buffer
518 * store buffer is not automatically drained.
519 */
Russell Kingbda0b742014-03-15 16:48:16 +0000520static void l2c310_set_debug(unsigned long val)
521{
522 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
523}
524
Russell King99ca17722014-03-15 16:48:18 +0000525static void l2c310_flush_all_erratum(void)
526{
527 void __iomem *base = l2x0_base;
528 unsigned long flags;
529
530 raw_spin_lock_irqsave(&l2x0_lock, flags);
531 l2c_set_debug(base, 0x03);
532 __l2c_op_way(base + L2X0_CLEAN_INV_WAY);
533 l2c_set_debug(base, 0x00);
534 __l2c210_cache_sync(base);
535 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
536}
537
Russell King09a5d182014-03-15 16:48:13 +0000538static void __init l2c310_save(void __iomem *base)
Russell Kingb98556f22014-03-15 16:48:11 +0000539{
Russell King09a5d182014-03-15 16:48:13 +0000540 unsigned revision;
Russell Kingb98556f22014-03-15 16:48:11 +0000541
542 l2x0_saved_regs.tag_latency = readl_relaxed(base +
543 L2X0_TAG_LATENCY_CTRL);
544 l2x0_saved_regs.data_latency = readl_relaxed(base +
545 L2X0_DATA_LATENCY_CTRL);
546 l2x0_saved_regs.filter_end = readl_relaxed(base +
547 L2X0_ADDR_FILTER_END);
548 l2x0_saved_regs.filter_start = readl_relaxed(base +
549 L2X0_ADDR_FILTER_START);
550
Russell King09a5d182014-03-15 16:48:13 +0000551 revision = readl_relaxed(base + L2X0_CACHE_ID) &
Russell Kingb98556f22014-03-15 16:48:11 +0000552 L2X0_CACHE_ID_RTL_MASK;
553
Russell King09a5d182014-03-15 16:48:13 +0000554 /* From r2p0, there is Prefetch offset/control register */
555 if (revision >= L310_CACHE_ID_RTL_R2P0)
556 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
557 L2X0_PREFETCH_CTRL);
Russell Kingb98556f22014-03-15 16:48:11 +0000558
Russell King09a5d182014-03-15 16:48:13 +0000559 /* From r3p0, there is Power control register */
560 if (revision >= L310_CACHE_ID_RTL_R3P0)
561 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
562 L2X0_POWER_CTRL);
563}
564
565static void l2c310_resume(void)
566{
567 void __iomem *base = l2x0_base;
568
569 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
570 unsigned revision;
571
572 /* restore pl310 setup */
573 writel_relaxed(l2x0_saved_regs.tag_latency,
574 base + L2X0_TAG_LATENCY_CTRL);
575 writel_relaxed(l2x0_saved_regs.data_latency,
576 base + L2X0_DATA_LATENCY_CTRL);
577 writel_relaxed(l2x0_saved_regs.filter_end,
578 base + L2X0_ADDR_FILTER_END);
579 writel_relaxed(l2x0_saved_regs.filter_start,
580 base + L2X0_ADDR_FILTER_START);
581
582 revision = readl_relaxed(base + L2X0_CACHE_ID) &
583 L2X0_CACHE_ID_RTL_MASK;
584
585 if (revision >= L310_CACHE_ID_RTL_R2P0)
586 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
587 base + L2X0_PREFETCH_CTRL);
588 if (revision >= L310_CACHE_ID_RTL_R3P0)
589 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
590 base + L2X0_POWER_CTRL);
591
592 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
593 }
Russell Kingb98556f22014-03-15 16:48:11 +0000594}
595
Russell King75461f52014-03-15 16:48:07 +0000596static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
597 struct outer_cache_fns *fns)
598{
599 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
600 const char *errata[4];
601 unsigned n = 0;
602
603 if (revision <= L310_CACHE_ID_RTL_R3P0)
Russell Kingbda0b742014-03-15 16:48:16 +0000604 fns->set_debug = l2c310_set_debug;
Russell King75461f52014-03-15 16:48:07 +0000605
Russell King99ca17722014-03-15 16:48:18 +0000606 if (IS_ENABLED(CONFIG_PL310_ERRATA_727915) &&
607 revision >= L310_CACHE_ID_RTL_R2P0 &&
608 revision < L310_CACHE_ID_RTL_R3P1) {
609 fns->flush_all = l2c310_flush_all_erratum;
610 errata[n++] = "727915";
611 }
612
Russell King75461f52014-03-15 16:48:07 +0000613 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
614 revision == L310_CACHE_ID_RTL_R3P0) {
615 sync_reg_offset = L2X0_DUMMY_REG;
616 errata[n++] = "753970";
617 }
618
619 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
620 errata[n++] = "769419";
621
622 if (n) {
623 unsigned i;
624
625 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
626 for (i = 0; i < n; i++)
627 pr_cont(" %s", errata[i]);
628 pr_cont(" enabled\n");
629 }
630}
631
632static const struct l2c_init_data l2c310_init_fns __initconst = {
633 .num_lock = 8,
634 .enable = l2c_enable,
635 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000636 .save = l2c310_save,
Russell King75461f52014-03-15 16:48:07 +0000637 .outer_cache = {
638 .inv_range = l2x0_inv_range,
639 .clean_range = l2x0_clean_range,
640 .flush_range = l2x0_flush_range,
641 .flush_all = l2x0_flush_all,
642 .disable = l2x0_disable,
643 .sync = l2x0_cache_sync,
Russell King09a5d182014-03-15 16:48:13 +0000644 .resume = l2c310_resume,
Russell King75461f52014-03-15 16:48:07 +0000645 },
646};
647
Russell King96054b02014-03-15 16:47:52 +0000648static void __init __l2c_init(const struct l2c_init_data *data,
649 u32 aux_val, u32 aux_mask, u32 cache_id)
Catalin Marinas382266a2007-02-05 14:48:19 +0100650{
Russell King75461f52014-03-15 16:48:07 +0000651 struct outer_cache_fns fns;
Russell King3e175ca2011-09-18 11:27:30 +0100652 u32 aux;
Russell King3e175ca2011-09-18 11:27:30 +0100653 u32 way_size = 0;
Jason McMullan64039be2010-05-05 18:59:37 +0100654 int ways;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100655 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
Jason McMullan64039be2010-05-05 18:59:37 +0100656 const char *type;
Catalin Marinas382266a2007-02-05 14:48:19 +0100657
Russell Kingc40e7eb2014-03-15 16:48:04 +0000658 /*
659 * It is strange to save the register state before initialisation,
660 * but hey, this is what the DT implementations decided to do.
661 */
662 if (data->save)
663 data->save(l2x0_base);
664
Catalin Marinas6775a552010-07-28 22:01:25 +0100665 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100666
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100667 aux &= aux_mask;
668 aux |= aux_val;
669
Jason McMullan64039be2010-05-05 18:59:37 +0100670 /* Determine the number of ways */
Rob Herring6e7acee2013-03-25 17:02:48 +0100671 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100672 case L2X0_CACHE_ID_PART_L310:
673 if (aux & (1 << 16))
674 ways = 16;
675 else
676 ways = 8;
677 type = "L310";
678 break;
Russell King75461f52014-03-15 16:48:07 +0000679
Jason McMullan64039be2010-05-05 18:59:37 +0100680 case L2X0_CACHE_ID_PART_L210:
681 ways = (aux >> 13) & 0xf;
682 type = "L210";
683 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100684
685 case AURORA_CACHE_ID:
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100686 ways = (aux >> 13) & 0xf;
687 ways = 2 << ((ways + 1) >> 2);
688 way_size_shift = AURORA_WAY_SIZE_SHIFT;
689 type = "Aurora";
690 break;
Russell King75461f52014-03-15 16:48:07 +0000691
Jason McMullan64039be2010-05-05 18:59:37 +0100692 default:
693 /* Assume unknown chips have 8 ways */
694 ways = 8;
695 type = "L2x0 series";
696 break;
697 }
698
699 l2x0_way_mask = (1 << ways) - 1;
700
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100701 /*
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530702 * L2 cache Size = Way size * Number of ways
703 */
704 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100705 way_size = 1 << (way_size + way_size_shift);
706
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530707 l2x0_size = ways * way_size * SZ_1K;
708
Russell King75461f52014-03-15 16:48:07 +0000709 fns = data->outer_cache;
710 if (data->fixup)
711 data->fixup(l2x0_base, cache_id, &fns);
712
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530713 /*
Russell King3b8bad52014-03-15 16:47:57 +0000714 * Check if l2x0 controller is already enabled. If we are booting
715 * in non-secure mode accessing the below registers will fault.
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100716 */
Russell King3b8bad52014-03-15 16:47:57 +0000717 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
718 data->enable(l2x0_base, aux, data->num_lock);
Catalin Marinas382266a2007-02-05 14:48:19 +0100719
Yilu Mao9d4876f2012-09-03 09:14:56 +0100720 /* Re-read it in case some bits are reserved. */
721 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
722
723 /* Save the value for resuming. */
724 l2x0_saved_regs.aux_ctrl = aux;
725
Russell King75461f52014-03-15 16:48:07 +0000726 outer_cache = fns;
Catalin Marinas382266a2007-02-05 14:48:19 +0100727
Russell Kingcdef8682014-03-15 16:48:08 +0000728 pr_info("%s cache controller enabled, %d ways, %d kB\n",
729 type, ways, l2x0_size >> 10);
730 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
731 type, cache_id, aux);
Catalin Marinas382266a2007-02-05 14:48:19 +0100732}
Rob Herring8c369262011-08-03 18:12:05 +0100733
Russell King96054b02014-03-15 16:47:52 +0000734void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
735{
Russell King75461f52014-03-15 16:48:07 +0000736 const struct l2c_init_data *data;
Russell King96054b02014-03-15 16:47:52 +0000737 u32 cache_id;
738
739 l2x0_base = base;
740
741 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
742
Russell King75461f52014-03-15 16:48:07 +0000743 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
744 default:
745 data = &l2x0_init_fns;
746 break;
747
Russell King6a28cf52014-03-15 18:55:53 +0000748 case L2X0_CACHE_ID_PART_L210:
749 data = &l2c210_data;
750 break;
751
Russell King75461f52014-03-15 16:48:07 +0000752 case L2X0_CACHE_ID_PART_L310:
753 data = &l2c310_init_fns;
754 break;
755 }
756
757 __l2c_init(data, aux_val, aux_mask, cache_id);
Russell King96054b02014-03-15 16:47:52 +0000758}
759
Rob Herring8c369262011-08-03 18:12:05 +0100760#ifdef CONFIG_OF
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100761static int l2_wt_override;
762
Russell King96054b02014-03-15 16:47:52 +0000763/* Aurora don't have the cache ID register available, so we have to
764 * pass it though the device tree */
765static u32 cache_id_part_number_from_dt;
766
Russell Kingda3627f2014-03-15 16:48:06 +0000767static void __init l2x0_of_parse(const struct device_node *np,
768 u32 *aux_val, u32 *aux_mask)
769{
770 u32 data[2] = { 0, 0 };
771 u32 tag = 0;
772 u32 dirty = 0;
773 u32 val = 0, mask = 0;
774
775 of_property_read_u32(np, "arm,tag-latency", &tag);
776 if (tag) {
777 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
778 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
779 }
780
781 of_property_read_u32_array(np, "arm,data-latency",
782 data, ARRAY_SIZE(data));
783 if (data[0] && data[1]) {
784 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
785 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
786 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
787 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
788 }
789
790 of_property_read_u32(np, "arm,dirty-latency", &dirty);
791 if (dirty) {
792 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
793 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
794 }
795
796 *aux_val &= ~mask;
797 *aux_val |= val;
798 *aux_mask &= ~mask;
799}
800
Russell King6a28cf52014-03-15 18:55:53 +0000801static const struct l2c_init_data of_l2c210_data __initconst = {
802 .num_lock = 1,
803 .of_parse = l2x0_of_parse,
804 .enable = l2c_enable,
805 .outer_cache = {
806 .inv_range = l2c210_inv_range,
807 .clean_range = l2c210_clean_range,
808 .flush_range = l2c210_flush_range,
809 .flush_all = l2c210_flush_all,
810 .disable = l2c_disable,
811 .sync = l2c210_sync,
812 .resume = l2c210_resume,
813 },
814};
815
Russell Kingda3627f2014-03-15 16:48:06 +0000816static const struct l2c_init_data of_l2x0_data __initconst = {
817 .of_parse = l2x0_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000818 .enable = l2x0_enable,
Russell Kingda3627f2014-03-15 16:48:06 +0000819 .outer_cache = {
820 .inv_range = l2x0_inv_range,
821 .clean_range = l2x0_clean_range,
822 .flush_range = l2x0_flush_range,
823 .flush_all = l2x0_flush_all,
824 .disable = l2x0_disable,
825 .sync = l2x0_cache_sync,
826 .resume = l2x0_resume,
827 },
828};
829
830static void __init pl310_of_parse(const struct device_node *np,
831 u32 *aux_val, u32 *aux_mask)
832{
833 u32 data[3] = { 0, 0, 0 };
834 u32 tag[3] = { 0, 0, 0 };
835 u32 filter[2] = { 0, 0 };
836
837 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
838 if (tag[0] && tag[1] && tag[2])
839 writel_relaxed(
840 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
841 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
842 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
843 l2x0_base + L2X0_TAG_LATENCY_CTRL);
844
845 of_property_read_u32_array(np, "arm,data-latency",
846 data, ARRAY_SIZE(data));
847 if (data[0] && data[1] && data[2])
848 writel_relaxed(
849 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
850 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
851 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
852 l2x0_base + L2X0_DATA_LATENCY_CTRL);
853
854 of_property_read_u32_array(np, "arm,filter-ranges",
855 filter, ARRAY_SIZE(filter));
856 if (filter[1]) {
857 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
858 l2x0_base + L2X0_ADDR_FILTER_END);
859 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
860 l2x0_base + L2X0_ADDR_FILTER_START);
861 }
862}
863
Russell Kingda3627f2014-03-15 16:48:06 +0000864static const struct l2c_init_data of_pl310_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000865 .num_lock = 8,
Russell Kingda3627f2014-03-15 16:48:06 +0000866 .of_parse = pl310_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000867 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +0000868 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000869 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +0000870 .outer_cache = {
871 .inv_range = l2x0_inv_range,
872 .clean_range = l2x0_clean_range,
873 .flush_range = l2x0_flush_range,
874 .flush_all = l2x0_flush_all,
875 .disable = l2x0_disable,
876 .sync = l2x0_cache_sync,
Russell King09a5d182014-03-15 16:48:13 +0000877 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +0000878 },
879};
880
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100881/*
882 * Note that the end addresses passed to Linux primitives are
883 * noninclusive, while the hardware cache range operations use
884 * inclusive start and end addresses.
885 */
886static unsigned long calc_range_end(unsigned long start, unsigned long end)
887{
888 /*
889 * Limit the number of cache lines processed at once,
890 * since cache range operations stall the CPU pipeline
891 * until completion.
892 */
893 if (end > start + MAX_RANGE_SIZE)
894 end = start + MAX_RANGE_SIZE;
895
896 /*
897 * Cache range operations can't straddle a page boundary.
898 */
899 if (end > PAGE_ALIGN(start+1))
900 end = PAGE_ALIGN(start+1);
901
902 return end;
903}
904
905/*
906 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
907 * and range operations only do a TLB lookup on the start address.
908 */
909static void aurora_pa_range(unsigned long start, unsigned long end,
910 unsigned long offset)
911{
912 unsigned long flags;
913
914 raw_spin_lock_irqsave(&l2x0_lock, flags);
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +0100915 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
916 writel_relaxed(end, l2x0_base + offset);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100917 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
918
919 cache_sync();
920}
921
922static void aurora_inv_range(unsigned long start, unsigned long end)
923{
924 /*
925 * round start and end adresses up to cache line size
926 */
927 start &= ~(CACHE_LINE_SIZE - 1);
928 end = ALIGN(end, CACHE_LINE_SIZE);
929
930 /*
931 * Invalidate all full cache lines between 'start' and 'end'.
932 */
933 while (start < end) {
934 unsigned long range_end = calc_range_end(start, end);
935 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
936 AURORA_INVAL_RANGE_REG);
937 start = range_end;
938 }
939}
940
941static void aurora_clean_range(unsigned long start, unsigned long end)
942{
943 /*
944 * If L2 is forced to WT, the L2 will always be clean and we
945 * don't need to do anything here.
946 */
947 if (!l2_wt_override) {
948 start &= ~(CACHE_LINE_SIZE - 1);
949 end = ALIGN(end, CACHE_LINE_SIZE);
950 while (start != end) {
951 unsigned long range_end = calc_range_end(start, end);
952 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
953 AURORA_CLEAN_RANGE_REG);
954 start = range_end;
955 }
956 }
957}
958
959static void aurora_flush_range(unsigned long start, unsigned long end)
960{
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100961 start &= ~(CACHE_LINE_SIZE - 1);
962 end = ALIGN(end, CACHE_LINE_SIZE);
963 while (start != end) {
964 unsigned long range_end = calc_range_end(start, end);
965 /*
966 * If L2 is forced to WT, the L2 will always be clean and we
967 * just need to invalidate.
968 */
969 if (l2_wt_override)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100970 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100971 AURORA_INVAL_RANGE_REG);
972 else
973 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
974 AURORA_FLUSH_RANGE_REG);
975 start = range_end;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100976 }
977}
978
Russell Kingda3627f2014-03-15 16:48:06 +0000979static void aurora_save(void __iomem *base)
980{
981 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
982 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
983}
984
985static void aurora_resume(void)
986{
Russell King09a5d182014-03-15 16:48:13 +0000987 void __iomem *base = l2x0_base;
988
989 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
990 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
991 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
Russell Kingda3627f2014-03-15 16:48:06 +0000992 }
993}
994
Russell King40266d62014-03-15 16:47:59 +0000995/*
996 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
997 * broadcasting of cache commands to L2.
998 */
999static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
1000 unsigned num_lock)
Russell Kingda3627f2014-03-15 16:48:06 +00001001{
Russell King40266d62014-03-15 16:47:59 +00001002 u32 u;
1003
1004 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
Russell Kingda3627f2014-03-15 16:48:06 +00001005 u |= AURORA_CTRL_FW; /* Set the FW bit */
Russell King40266d62014-03-15 16:47:59 +00001006 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
1007
Russell Kingda3627f2014-03-15 16:48:06 +00001008 isb();
Russell King40266d62014-03-15 16:47:59 +00001009
1010 l2c_enable(base, aux, num_lock);
Russell Kingda3627f2014-03-15 16:48:06 +00001011}
1012
Russell King75461f52014-03-15 16:48:07 +00001013static void __init aurora_fixup(void __iomem *base, u32 cache_id,
1014 struct outer_cache_fns *fns)
1015{
1016 sync_reg_offset = AURORA_SYNC_REG;
1017}
1018
Russell Kingda3627f2014-03-15 16:48:06 +00001019static void __init aurora_of_parse(const struct device_node *np,
1020 u32 *aux_val, u32 *aux_mask)
1021{
1022 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
1023 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
1024
1025 of_property_read_u32(np, "cache-id-part",
1026 &cache_id_part_number_from_dt);
1027
1028 /* Determine and save the write policy */
1029 l2_wt_override = of_property_read_bool(np, "wt-override");
1030
1031 if (l2_wt_override) {
1032 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
1033 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
1034 }
1035
1036 *aux_val &= ~mask;
1037 *aux_val |= val;
1038 *aux_mask &= ~mask;
1039}
1040
1041static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +00001042 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +00001043 .of_parse = aurora_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +00001044 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +00001045 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +00001046 .save = aurora_save,
1047 .outer_cache = {
1048 .inv_range = aurora_inv_range,
1049 .clean_range = aurora_clean_range,
1050 .flush_range = aurora_flush_range,
1051 .flush_all = l2x0_flush_all,
1052 .disable = l2x0_disable,
1053 .sync = l2x0_cache_sync,
1054 .resume = aurora_resume,
1055 },
1056};
1057
1058static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +00001059 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +00001060 .of_parse = aurora_of_parse,
Russell King40266d62014-03-15 16:47:59 +00001061 .enable = aurora_enable_no_outer,
Russell King75461f52014-03-15 16:48:07 +00001062 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +00001063 .save = aurora_save,
1064 .outer_cache = {
1065 .resume = aurora_resume,
1066 },
1067};
1068
Christian Daudt3b656fe2013-05-09 22:21:01 +01001069/*
1070 * For certain Broadcom SoCs, depending on the address range, different offsets
1071 * need to be added to the address before passing it to L2 for
1072 * invalidation/clean/flush
1073 *
1074 * Section Address Range Offset EMI
1075 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
1076 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
1077 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
1078 *
1079 * When the start and end addresses have crossed two different sections, we
1080 * need to break the L2 operation into two, each within its own section.
1081 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
1082 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
1083 * 0xC0000000 - 0xC0001000
1084 *
1085 * Note 1:
1086 * By breaking a single L2 operation into two, we may potentially suffer some
1087 * performance hit, but keep in mind the cross section case is very rare
1088 *
1089 * Note 2:
1090 * We do not need to handle the case when the start address is in
1091 * Section 1 and the end address is in Section 3, since it is not a valid use
1092 * case
1093 *
1094 * Note 3:
1095 * Section 1 in practical terms can no longer be used on rev A2. Because of
1096 * that the code does not need to handle section 1 at all.
1097 *
1098 */
1099#define BCM_SYS_EMI_START_ADDR 0x40000000UL
1100#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
1101
1102#define BCM_SYS_EMI_OFFSET 0x40000000UL
1103#define BCM_VC_EMI_OFFSET 0x80000000UL
1104
1105static inline int bcm_addr_is_sys_emi(unsigned long addr)
1106{
1107 return (addr >= BCM_SYS_EMI_START_ADDR) &&
1108 (addr < BCM_VC_EMI_SEC3_START_ADDR);
1109}
1110
1111static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
1112{
1113 if (bcm_addr_is_sys_emi(addr))
1114 return addr + BCM_SYS_EMI_OFFSET;
1115 else
1116 return addr + BCM_VC_EMI_OFFSET;
1117}
1118
1119static void bcm_inv_range(unsigned long start, unsigned long end)
1120{
1121 unsigned long new_start, new_end;
1122
1123 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1124
1125 if (unlikely(end <= start))
1126 return;
1127
1128 new_start = bcm_l2_phys_addr(start);
1129 new_end = bcm_l2_phys_addr(end);
1130
1131 /* normal case, no cross section between start and end */
1132 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1133 l2x0_inv_range(new_start, new_end);
1134 return;
1135 }
1136
1137 /* They cross sections, so it can only be a cross from section
1138 * 2 to section 3
1139 */
1140 l2x0_inv_range(new_start,
1141 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1142 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1143 new_end);
1144}
1145
1146static void bcm_clean_range(unsigned long start, unsigned long end)
1147{
1148 unsigned long new_start, new_end;
1149
1150 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1151
1152 if (unlikely(end <= start))
1153 return;
1154
1155 if ((end - start) >= l2x0_size) {
1156 l2x0_clean_all();
1157 return;
1158 }
1159
1160 new_start = bcm_l2_phys_addr(start);
1161 new_end = bcm_l2_phys_addr(end);
1162
1163 /* normal case, no cross section between start and end */
1164 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1165 l2x0_clean_range(new_start, new_end);
1166 return;
1167 }
1168
1169 /* They cross sections, so it can only be a cross from section
1170 * 2 to section 3
1171 */
1172 l2x0_clean_range(new_start,
1173 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1174 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1175 new_end);
1176}
1177
1178static void bcm_flush_range(unsigned long start, unsigned long end)
1179{
1180 unsigned long new_start, new_end;
1181
1182 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1183
1184 if (unlikely(end <= start))
1185 return;
1186
1187 if ((end - start) >= l2x0_size) {
1188 l2x0_flush_all();
1189 return;
1190 }
1191
1192 new_start = bcm_l2_phys_addr(start);
1193 new_end = bcm_l2_phys_addr(end);
1194
1195 /* normal case, no cross section between start and end */
1196 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1197 l2x0_flush_range(new_start, new_end);
1198 return;
1199 }
1200
1201 /* They cross sections, so it can only be a cross from section
1202 * 2 to section 3
1203 */
1204 l2x0_flush_range(new_start,
1205 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1206 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1207 new_end);
1208}
1209
Russell Kingda3627f2014-03-15 16:48:06 +00001210static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +00001211 .num_lock = 8,
Russell Kingda3627f2014-03-15 16:48:06 +00001212 .of_parse = pl310_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +00001213 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +00001214 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +00001215 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +00001216 .outer_cache = {
1217 .inv_range = bcm_inv_range,
1218 .clean_range = bcm_clean_range,
1219 .flush_range = bcm_flush_range,
1220 .flush_all = l2x0_flush_all,
1221 .disable = l2x0_disable,
1222 .sync = l2x0_cache_sync,
Russell King09a5d182014-03-15 16:48:13 +00001223 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +00001224 },
1225};
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001226
Russell King9846dfc2014-03-15 16:47:55 +00001227static void __init tauros3_save(void __iomem *base)
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001228{
1229 l2x0_saved_regs.aux2_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001230 readl_relaxed(base + TAUROS3_AUX2_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001231 l2x0_saved_regs.prefetch_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001232 readl_relaxed(base + L2X0_PREFETCH_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001233}
1234
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001235static void tauros3_resume(void)
1236{
Russell King09a5d182014-03-15 16:48:13 +00001237 void __iomem *base = l2x0_base;
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001238
Russell King09a5d182014-03-15 16:48:13 +00001239 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1240 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1241 base + TAUROS3_AUX2_CTRL);
1242 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1243 base + L2X0_PREFETCH_CTRL);
1244
1245 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
1246 }
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001247}
1248
Russell Kingc02642b2014-03-15 16:47:54 +00001249static const struct l2c_init_data of_tauros3_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +00001250 .num_lock = 8,
1251 .enable = l2c_enable,
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001252 .save = tauros3_save,
1253 /* Tauros3 broadcasts L1 cache operations to L2 */
1254 .outer_cache = {
1255 .resume = tauros3_resume,
1256 },
1257};
1258
Russell Kinga65bb922014-03-15 16:48:01 +00001259#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
Rob Herring8c369262011-08-03 18:12:05 +01001260static const struct of_device_id l2x0_ids[] __initconst = {
Russell King6a28cf52014-03-15 18:55:53 +00001261 L2C_ID("arm,l210-cache", of_l2c210_data),
Russell Kingc02642b2014-03-15 16:47:54 +00001262 L2C_ID("arm,l220-cache", of_l2x0_data),
1263 L2C_ID("arm,pl310-cache", of_pl310_data),
1264 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1265 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1266 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1267 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
Russell Kinga65bb922014-03-15 16:48:01 +00001268 /* Deprecated IDs */
Russell Kingc02642b2014-03-15 16:47:54 +00001269 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
Rob Herring8c369262011-08-03 18:12:05 +01001270 {}
1271};
1272
Russell King3e175ca2011-09-18 11:27:30 +01001273int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +01001274{
Russell Kingc02642b2014-03-15 16:47:54 +00001275 const struct l2c_init_data *data;
Rob Herring8c369262011-08-03 18:12:05 +01001276 struct device_node *np;
Barry Song91c2ebb2011-09-30 14:43:12 +01001277 struct resource res;
Russell King96054b02014-03-15 16:47:52 +00001278 u32 cache_id;
Rob Herring8c369262011-08-03 18:12:05 +01001279
1280 np = of_find_matching_node(NULL, l2x0_ids);
1281 if (!np)
1282 return -ENODEV;
Barry Song91c2ebb2011-09-30 14:43:12 +01001283
1284 if (of_address_to_resource(np, 0, &res))
1285 return -ENODEV;
1286
1287 l2x0_base = ioremap(res.start, resource_size(&res));
Rob Herring8c369262011-08-03 18:12:05 +01001288 if (!l2x0_base)
1289 return -ENOMEM;
1290
Barry Song91c2ebb2011-09-30 14:43:12 +01001291 l2x0_saved_regs.phy_base = res.start;
1292
1293 data = of_match_node(l2x0_ids, np)->data;
1294
Rob Herring8c369262011-08-03 18:12:05 +01001295 /* L2 configuration can only be changed if the cache is disabled */
Russell King40266d62014-03-15 16:47:59 +00001296 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
Russell Kingc02642b2014-03-15 16:47:54 +00001297 if (data->of_parse)
1298 data->of_parse(np, &aux_val, &aux_mask);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001299
Russell King96054b02014-03-15 16:47:52 +00001300 if (cache_id_part_number_from_dt)
1301 cache_id = cache_id_part_number_from_dt;
1302 else
1303 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1304
1305 __l2c_init(data, aux_val, aux_mask, cache_id);
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001306
Rob Herring8c369262011-08-03 18:12:05 +01001307 return 0;
1308}
1309#endif