blob: 4d985c17291c67d7831a3b73c103cb406d3fd870 [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
Rob Herring8c369262011-08-03 18:12:05 +010019#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010020#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Rob Herring8c369262011-08-03 18:12:05 +010023#include <linux/of.h>
24#include <linux/of_address.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025
26#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010027#include <asm/hardware/cache-l2x0.h>
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +010028#include "cache-tauros3.h"
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010029#include "cache-aurora-l2.h"
Catalin Marinas382266a2007-02-05 14:48:19 +010030
Russell Kingc02642b2014-03-15 16:47:54 +000031struct l2c_init_data {
Russell King3b8bad52014-03-15 16:47:57 +000032 unsigned num_lock;
Russell Kingc02642b2014-03-15 16:47:54 +000033 void (*of_parse)(const struct device_node *, u32 *, u32 *);
Russell King3b8bad52014-03-15 16:47:57 +000034 void (*enable)(void __iomem *, u32, unsigned);
Russell King75461f52014-03-15 16:48:07 +000035 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
Russell King9846dfc2014-03-15 16:47:55 +000036 void (*save)(void __iomem *);
Russell Kingc02642b2014-03-15 16:47:54 +000037 struct outer_cache_fns outer_cache;
38};
39
Catalin Marinas382266a2007-02-05 14:48:19 +010040#define CACHE_LINE_SIZE 32
41
42static void __iomem *l2x0_base;
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050043static DEFINE_RAW_SPINLOCK(l2x0_lock);
Russell King3e175ca2011-09-18 11:27:30 +010044static u32 l2x0_way_mask; /* Bitmask of active ways */
45static u32 l2x0_size;
Will Deaconf154fe92012-04-20 17:21:08 +010046static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
Catalin Marinas382266a2007-02-05 14:48:19 +010047
Barry Song91c2ebb2011-09-30 14:43:12 +010048struct l2x0_regs l2x0_saved_regs;
49
Russell King37abcdb2014-03-15 16:47:50 +000050/*
51 * Common code for all cache controllers.
52 */
Russell King83841fe2014-03-15 16:48:14 +000053static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010054{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010055 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010056 while (readl_relaxed(reg) & mask)
Barry Song1caf3092011-09-09 10:30:34 +010057 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010058}
59
Russell King2b2a87a2014-03-16 17:19:21 +000060/*
61 * This should only be called when we have a requirement that the
62 * register be written due to a work-around, as platforms running
63 * in non-secure mode may not be able to access this register.
64 */
65static inline void l2c_set_debug(void __iomem *base, unsigned long val)
66{
67 outer_cache.set_debug(val);
68}
69
Russell Kingdf5dd4c2014-03-15 16:47:56 +000070static void __l2c_op_way(void __iomem *reg)
71{
72 writel_relaxed(l2x0_way_mask, reg);
Russell King83841fe2014-03-15 16:48:14 +000073 l2c_wait_mask(reg, l2x0_way_mask);
Russell Kingdf5dd4c2014-03-15 16:47:56 +000074}
75
Russell King37abcdb2014-03-15 16:47:50 +000076static inline void l2c_unlock(void __iomem *base, unsigned num)
77{
78 unsigned i;
79
80 for (i = 0; i < num; i++) {
81 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
82 i * L2X0_LOCKDOWN_STRIDE);
83 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
84 i * L2X0_LOCKDOWN_STRIDE);
85 }
86}
87
Russell King3b8bad52014-03-15 16:47:57 +000088/*
89 * Enable the L2 cache controller. This function must only be
90 * called when the cache controller is known to be disabled.
91 */
92static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
93{
94 unsigned long flags;
95
Russell King9a07f272014-03-17 20:10:31 +000096 /* Only write the aux register if it needs changing */
97 if (readl_relaxed(base + L2X0_AUX_CTRL) != aux)
98 writel_relaxed(aux, base + L2X0_AUX_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +000099
Russell King17f3f992014-03-17 17:15:02 +0000100 l2c_unlock(base, num_lock);
101
Russell King3b8bad52014-03-15 16:47:57 +0000102 local_irq_save(flags);
103 __l2c_op_way(base + L2X0_INV_WAY);
104 writel_relaxed(0, base + sync_reg_offset);
105 l2c_wait_mask(base + sync_reg_offset, 1);
106 local_irq_restore(flags);
107
108 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
109}
110
111static void l2c_disable(void)
112{
113 void __iomem *base = l2x0_base;
114
115 outer_cache.flush_all();
116 writel_relaxed(0, base + L2X0_CTRL);
117 dsb(st);
118}
119
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100120#ifdef CONFIG_CACHE_PL310
121static inline void cache_wait(void __iomem *reg, unsigned long mask)
122{
123 /* cache operations by line are atomic on PL310 */
124}
125#else
Russell King83841fe2014-03-15 16:48:14 +0000126#define cache_wait l2c_wait_mask
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100127#endif
128
Catalin Marinas382266a2007-02-05 14:48:19 +0100129static inline void cache_sync(void)
130{
Russell King3d107432009-11-19 11:41:09 +0000131 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +0100132
Will Deaconf154fe92012-04-20 17:21:08 +0100133 writel_relaxed(0, base + sync_reg_offset);
Russell King3d107432009-11-19 11:41:09 +0000134 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100135}
136
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100137static inline void l2x0_clean_line(unsigned long addr)
138{
139 void __iomem *base = l2x0_base;
140 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100141 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100142}
143
144static inline void l2x0_inv_line(unsigned long addr)
145{
146 void __iomem *base = l2x0_base;
147 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100148 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100149}
150
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100151#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Will Deaconab4d5362012-04-20 17:22:11 +0100152static inline void debug_writel(unsigned long val)
153{
154 if (outer_cache.set_debug)
Russell King2b2a87a2014-03-16 17:19:21 +0000155 l2c_set_debug(l2x0_base, val);
Will Deaconab4d5362012-04-20 17:22:11 +0100156}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100157
Will Deaconab4d5362012-04-20 17:22:11 +0100158static void pl310_set_debug(unsigned long val)
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100159{
160 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
161}
162#else
163/* Optimised out for non-errata case */
164static inline void debug_writel(unsigned long val)
165{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100166}
167
Will Deaconab4d5362012-04-20 17:22:11 +0100168#define pl310_set_debug NULL
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100169#endif
170
171#ifdef CONFIG_PL310_ERRATA_588369
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100172static inline void l2x0_flush_line(unsigned long addr)
173{
174 void __iomem *base = l2x0_base;
175
176 /* Clean by PA followed by Invalidate by PA */
177 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100178 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100179 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100180 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100181}
182#else
183
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100184static inline void l2x0_flush_line(unsigned long addr)
185{
186 void __iomem *base = l2x0_base;
187 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100188 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100189}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100190#endif
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100191
Catalin Marinas23107c52010-03-24 16:48:53 +0100192static void l2x0_cache_sync(void)
193{
194 unsigned long flags;
195
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500196 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100197 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500198 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100199}
200
Will Deacon38a89142011-07-01 14:36:19 +0100201static void __l2x0_flush_all(void)
202{
203 debug_writel(0x03);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000204 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
Will Deacon38a89142011-07-01 14:36:19 +0100205 cache_sync();
206 debug_writel(0x00);
207}
208
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530209static void l2x0_flush_all(void)
210{
211 unsigned long flags;
212
213 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500214 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100215 __l2x0_flush_all();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500216 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530217}
218
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530219static void l2x0_clean_all(void)
220{
221 unsigned long flags;
222
223 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500224 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000225 __l2c_op_way(l2x0_base + L2X0_CLEAN_WAY);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530226 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500227 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530228}
229
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530230static void l2x0_inv_all(void)
Catalin Marinas382266a2007-02-05 14:48:19 +0100231{
Russell King0eb948d2009-11-19 11:12:15 +0000232 unsigned long flags;
233
Catalin Marinas382266a2007-02-05 14:48:19 +0100234 /* invalidate all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500235 raw_spin_lock_irqsave(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530236 /* Invalidating when L2 is enabled is a nono */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100237 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000238 __l2c_op_way(l2x0_base + L2X0_INV_WAY);
Catalin Marinas382266a2007-02-05 14:48:19 +0100239 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500240 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100241}
242
243static void l2x0_inv_range(unsigned long start, unsigned long end)
244{
Russell King3d107432009-11-19 11:41:09 +0000245 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000246 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100247
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500248 raw_spin_lock_irqsave(&l2x0_lock, flags);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100249 if (start & (CACHE_LINE_SIZE - 1)) {
250 start &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100251 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100252 l2x0_flush_line(start);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100253 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100254 start += CACHE_LINE_SIZE;
255 }
256
257 if (end & (CACHE_LINE_SIZE - 1)) {
258 end &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100259 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100260 l2x0_flush_line(end);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100261 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100262 }
263
Russell King0eb948d2009-11-19 11:12:15 +0000264 while (start < end) {
265 unsigned long blk_end = start + min(end - start, 4096UL);
266
267 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100268 l2x0_inv_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000269 start += CACHE_LINE_SIZE;
270 }
271
272 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500273 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
274 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000275 }
276 }
Russell King3d107432009-11-19 11:41:09 +0000277 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100278 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500279 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100280}
281
282static void l2x0_clean_range(unsigned long start, unsigned long end)
283{
Russell King3d107432009-11-19 11:41:09 +0000284 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000285 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100286
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530287 if ((end - start) >= l2x0_size) {
288 l2x0_clean_all();
289 return;
290 }
291
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500292 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100293 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000294 while (start < end) {
295 unsigned long blk_end = start + min(end - start, 4096UL);
296
297 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100298 l2x0_clean_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000299 start += CACHE_LINE_SIZE;
300 }
301
302 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500303 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
304 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000305 }
306 }
Russell King3d107432009-11-19 11:41:09 +0000307 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100308 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500309 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100310}
311
312static void l2x0_flush_range(unsigned long start, unsigned long end)
313{
Russell King3d107432009-11-19 11:41:09 +0000314 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000315 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100316
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530317 if ((end - start) >= l2x0_size) {
318 l2x0_flush_all();
319 return;
320 }
321
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500322 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100323 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000324 while (start < end) {
325 unsigned long blk_end = start + min(end - start, 4096UL);
326
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100327 debug_writel(0x03);
Russell King0eb948d2009-11-19 11:12:15 +0000328 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100329 l2x0_flush_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000330 start += CACHE_LINE_SIZE;
331 }
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100332 debug_writel(0x00);
Russell King0eb948d2009-11-19 11:12:15 +0000333
334 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500335 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
336 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000337 }
338 }
Russell King3d107432009-11-19 11:41:09 +0000339 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100340 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500341 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100342}
343
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530344static void l2x0_disable(void)
345{
346 unsigned long flags;
347
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500348 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100349 __l2x0_flush_all();
350 writel_relaxed(0, l2x0_base + L2X0_CTRL);
Will Deacon9781aa82013-06-12 09:59:59 +0100351 dsb(st);
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500352 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530353}
354
Russell King3e175ca2011-09-18 11:27:30 +0100355static void l2x0_unlock(u32 cache_id)
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100356{
357 int lockregs;
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100358
Rob Herring6e7acee2013-03-25 17:02:48 +0100359 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100360 case L2X0_CACHE_ID_PART_L310:
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100361 lockregs = 8;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100362 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100363 default:
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100364 /* L210 and unknown types */
365 lockregs = 1;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100366 break;
367 }
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100368
Russell King37abcdb2014-03-15 16:47:50 +0000369 l2c_unlock(l2x0_base, lockregs);
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100370}
371
Russell King3b8bad52014-03-15 16:47:57 +0000372static void l2x0_enable(void __iomem *base, u32 aux, unsigned num_lock)
373{
Russell King3b8bad52014-03-15 16:47:57 +0000374 /* l2x0 controller is disabled */
375 writel_relaxed(aux, base + L2X0_AUX_CTRL);
376
Russell King17f3f992014-03-17 17:15:02 +0000377 /* Make sure that I&D is not locked down when starting */
378 l2x0_unlock(readl_relaxed(base + L2X0_CACHE_ID));
379
Russell King3b8bad52014-03-15 16:47:57 +0000380 l2x0_inv_all();
381
382 /* enable L2X0 */
383 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
384}
385
Russell Kingb98556f22014-03-15 16:48:11 +0000386static void l2x0_resume(void)
387{
388 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
389 /* restore aux ctrl and enable l2 */
390 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
391
392 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
393 L2X0_AUX_CTRL);
394
395 l2x0_inv_all();
396
397 writel_relaxed(L2X0_CTRL_EN, l2x0_base + L2X0_CTRL);
398 }
399}
400
Russell King96054b02014-03-15 16:47:52 +0000401static const struct l2c_init_data l2x0_init_fns __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000402 .enable = l2x0_enable,
Russell King96054b02014-03-15 16:47:52 +0000403 .outer_cache = {
404 .inv_range = l2x0_inv_range,
405 .clean_range = l2x0_clean_range,
406 .flush_range = l2x0_flush_range,
407 .flush_all = l2x0_flush_all,
408 .disable = l2x0_disable,
409 .sync = l2x0_cache_sync,
Russell Kingb98556f22014-03-15 16:48:11 +0000410 .resume = l2x0_resume,
Russell King96054b02014-03-15 16:47:52 +0000411 },
412};
413
Russell King75461f52014-03-15 16:48:07 +0000414/*
415 * L2C-310 specific code.
416 *
417 * Errata:
418 * 588369: PL310 R0P0->R1P0, fixed R2P0.
419 * Affects: all clean+invalidate operations
420 * clean and invalidate skips the invalidate step, so we need to issue
421 * separate operations. We also require the above debug workaround
422 * enclosing this code fragment on affected parts. On unaffected parts,
423 * we must not use this workaround without the debug register writes
424 * to avoid exposing a problem similar to 727915.
425 *
426 * 727915: PL310 R2P0->R3P0, fixed R3P1.
427 * Affects: clean+invalidate by way
428 * clean and invalidate by way runs in the background, and a store can
429 * hit the line between the clean operation and invalidate operation,
430 * resulting in the store being lost.
431 *
432 * 753970: PL310 R3P0, fixed R3P1.
433 * Affects: sync
434 * prevents merging writes after the sync operation, until another L2C
435 * operation is performed (or a number of other conditions.)
436 *
437 * 769419: PL310 R0P0->R3P1, fixed R3P2.
438 * Affects: store buffer
439 * store buffer is not automatically drained.
440 */
Russell Kingb98556f22014-03-15 16:48:11 +0000441static void __init pl310_save(void __iomem *base)
442{
443 u32 l2x0_revision = readl_relaxed(base + L2X0_CACHE_ID) &
444 L2X0_CACHE_ID_RTL_MASK;
445
446 l2x0_saved_regs.tag_latency = readl_relaxed(base +
447 L2X0_TAG_LATENCY_CTRL);
448 l2x0_saved_regs.data_latency = readl_relaxed(base +
449 L2X0_DATA_LATENCY_CTRL);
450 l2x0_saved_regs.filter_end = readl_relaxed(base +
451 L2X0_ADDR_FILTER_END);
452 l2x0_saved_regs.filter_start = readl_relaxed(base +
453 L2X0_ADDR_FILTER_START);
454
455 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) {
456 /*
457 * From r2p0, there is Prefetch offset/control register
458 */
459 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
460 L2X0_PREFETCH_CTRL);
461 /*
462 * From r3p0, there is Power control register
463 */
464 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0)
465 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
466 L2X0_POWER_CTRL);
467 }
468}
469
470static void pl310_resume(void)
471{
472 u32 l2x0_revision;
473
474 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
475 /* restore pl310 setup */
476 writel_relaxed(l2x0_saved_regs.tag_latency,
477 l2x0_base + L2X0_TAG_LATENCY_CTRL);
478 writel_relaxed(l2x0_saved_regs.data_latency,
479 l2x0_base + L2X0_DATA_LATENCY_CTRL);
480 writel_relaxed(l2x0_saved_regs.filter_end,
481 l2x0_base + L2X0_ADDR_FILTER_END);
482 writel_relaxed(l2x0_saved_regs.filter_start,
483 l2x0_base + L2X0_ADDR_FILTER_START);
484
485 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
486 L2X0_CACHE_ID_RTL_MASK;
487
488 if (l2x0_revision >= L310_CACHE_ID_RTL_R2P0) {
489 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
490 l2x0_base + L2X0_PREFETCH_CTRL);
491 if (l2x0_revision >= L310_CACHE_ID_RTL_R3P0)
492 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
493 l2x0_base + L2X0_POWER_CTRL);
494 }
495 }
496
497 l2x0_resume();
498}
499
Russell King75461f52014-03-15 16:48:07 +0000500static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
501 struct outer_cache_fns *fns)
502{
503 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
504 const char *errata[4];
505 unsigned n = 0;
506
507 if (revision <= L310_CACHE_ID_RTL_R3P0)
508 fns->set_debug = pl310_set_debug;
509
510 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
511 revision == L310_CACHE_ID_RTL_R3P0) {
512 sync_reg_offset = L2X0_DUMMY_REG;
513 errata[n++] = "753970";
514 }
515
516 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
517 errata[n++] = "769419";
518
519 if (n) {
520 unsigned i;
521
522 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
523 for (i = 0; i < n; i++)
524 pr_cont(" %s", errata[i]);
525 pr_cont(" enabled\n");
526 }
527}
528
529static const struct l2c_init_data l2c310_init_fns __initconst = {
530 .num_lock = 8,
531 .enable = l2c_enable,
532 .fixup = l2c310_fixup,
Russell Kingb98556f22014-03-15 16:48:11 +0000533 .save = pl310_save,
Russell King75461f52014-03-15 16:48:07 +0000534 .outer_cache = {
535 .inv_range = l2x0_inv_range,
536 .clean_range = l2x0_clean_range,
537 .flush_range = l2x0_flush_range,
538 .flush_all = l2x0_flush_all,
539 .disable = l2x0_disable,
540 .sync = l2x0_cache_sync,
Russell Kingb98556f22014-03-15 16:48:11 +0000541 .resume = pl310_resume,
Russell King75461f52014-03-15 16:48:07 +0000542 },
543};
544
Russell King96054b02014-03-15 16:47:52 +0000545static void __init __l2c_init(const struct l2c_init_data *data,
546 u32 aux_val, u32 aux_mask, u32 cache_id)
Catalin Marinas382266a2007-02-05 14:48:19 +0100547{
Russell King75461f52014-03-15 16:48:07 +0000548 struct outer_cache_fns fns;
Russell King3e175ca2011-09-18 11:27:30 +0100549 u32 aux;
Russell King3e175ca2011-09-18 11:27:30 +0100550 u32 way_size = 0;
Jason McMullan64039be2010-05-05 18:59:37 +0100551 int ways;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100552 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
Jason McMullan64039be2010-05-05 18:59:37 +0100553 const char *type;
Catalin Marinas382266a2007-02-05 14:48:19 +0100554
Russell Kingc40e7eb2014-03-15 16:48:04 +0000555 /*
556 * It is strange to save the register state before initialisation,
557 * but hey, this is what the DT implementations decided to do.
558 */
559 if (data->save)
560 data->save(l2x0_base);
561
Catalin Marinas6775a552010-07-28 22:01:25 +0100562 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100563
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100564 aux &= aux_mask;
565 aux |= aux_val;
566
Jason McMullan64039be2010-05-05 18:59:37 +0100567 /* Determine the number of ways */
Rob Herring6e7acee2013-03-25 17:02:48 +0100568 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100569 case L2X0_CACHE_ID_PART_L310:
570 if (aux & (1 << 16))
571 ways = 16;
572 else
573 ways = 8;
574 type = "L310";
575 break;
Russell King75461f52014-03-15 16:48:07 +0000576
Jason McMullan64039be2010-05-05 18:59:37 +0100577 case L2X0_CACHE_ID_PART_L210:
578 ways = (aux >> 13) & 0xf;
579 type = "L210";
580 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100581
582 case AURORA_CACHE_ID:
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100583 ways = (aux >> 13) & 0xf;
584 ways = 2 << ((ways + 1) >> 2);
585 way_size_shift = AURORA_WAY_SIZE_SHIFT;
586 type = "Aurora";
587 break;
Russell King75461f52014-03-15 16:48:07 +0000588
Jason McMullan64039be2010-05-05 18:59:37 +0100589 default:
590 /* Assume unknown chips have 8 ways */
591 ways = 8;
592 type = "L2x0 series";
593 break;
594 }
595
596 l2x0_way_mask = (1 << ways) - 1;
597
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100598 /*
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530599 * L2 cache Size = Way size * Number of ways
600 */
601 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100602 way_size = 1 << (way_size + way_size_shift);
603
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530604 l2x0_size = ways * way_size * SZ_1K;
605
Russell King75461f52014-03-15 16:48:07 +0000606 fns = data->outer_cache;
607 if (data->fixup)
608 data->fixup(l2x0_base, cache_id, &fns);
609
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530610 /*
Russell King3b8bad52014-03-15 16:47:57 +0000611 * Check if l2x0 controller is already enabled. If we are booting
612 * in non-secure mode accessing the below registers will fault.
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100613 */
Russell King3b8bad52014-03-15 16:47:57 +0000614 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
615 data->enable(l2x0_base, aux, data->num_lock);
Catalin Marinas382266a2007-02-05 14:48:19 +0100616
Yilu Mao9d4876f2012-09-03 09:14:56 +0100617 /* Re-read it in case some bits are reserved. */
618 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
619
620 /* Save the value for resuming. */
621 l2x0_saved_regs.aux_ctrl = aux;
622
Russell King75461f52014-03-15 16:48:07 +0000623 outer_cache = fns;
Catalin Marinas382266a2007-02-05 14:48:19 +0100624
Russell Kingcdef8682014-03-15 16:48:08 +0000625 pr_info("%s cache controller enabled, %d ways, %d kB\n",
626 type, ways, l2x0_size >> 10);
627 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
628 type, cache_id, aux);
Catalin Marinas382266a2007-02-05 14:48:19 +0100629}
Rob Herring8c369262011-08-03 18:12:05 +0100630
Russell King96054b02014-03-15 16:47:52 +0000631void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
632{
Russell King75461f52014-03-15 16:48:07 +0000633 const struct l2c_init_data *data;
Russell King96054b02014-03-15 16:47:52 +0000634 u32 cache_id;
635
636 l2x0_base = base;
637
638 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
639
Russell King75461f52014-03-15 16:48:07 +0000640 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
641 default:
642 data = &l2x0_init_fns;
643 break;
644
645 case L2X0_CACHE_ID_PART_L310:
646 data = &l2c310_init_fns;
647 break;
648 }
649
650 __l2c_init(data, aux_val, aux_mask, cache_id);
Russell King96054b02014-03-15 16:47:52 +0000651}
652
Rob Herring8c369262011-08-03 18:12:05 +0100653#ifdef CONFIG_OF
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100654static int l2_wt_override;
655
Russell King96054b02014-03-15 16:47:52 +0000656/* Aurora don't have the cache ID register available, so we have to
657 * pass it though the device tree */
658static u32 cache_id_part_number_from_dt;
659
Russell Kingda3627f2014-03-15 16:48:06 +0000660static void __init l2x0_of_parse(const struct device_node *np,
661 u32 *aux_val, u32 *aux_mask)
662{
663 u32 data[2] = { 0, 0 };
664 u32 tag = 0;
665 u32 dirty = 0;
666 u32 val = 0, mask = 0;
667
668 of_property_read_u32(np, "arm,tag-latency", &tag);
669 if (tag) {
670 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
671 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
672 }
673
674 of_property_read_u32_array(np, "arm,data-latency",
675 data, ARRAY_SIZE(data));
676 if (data[0] && data[1]) {
677 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
678 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
679 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
680 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
681 }
682
683 of_property_read_u32(np, "arm,dirty-latency", &dirty);
684 if (dirty) {
685 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
686 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
687 }
688
689 *aux_val &= ~mask;
690 *aux_val |= val;
691 *aux_mask &= ~mask;
692}
693
Russell Kingda3627f2014-03-15 16:48:06 +0000694static const struct l2c_init_data of_l2x0_data __initconst = {
695 .of_parse = l2x0_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000696 .enable = l2x0_enable,
Russell Kingda3627f2014-03-15 16:48:06 +0000697 .outer_cache = {
698 .inv_range = l2x0_inv_range,
699 .clean_range = l2x0_clean_range,
700 .flush_range = l2x0_flush_range,
701 .flush_all = l2x0_flush_all,
702 .disable = l2x0_disable,
703 .sync = l2x0_cache_sync,
704 .resume = l2x0_resume,
705 },
706};
707
708static void __init pl310_of_parse(const struct device_node *np,
709 u32 *aux_val, u32 *aux_mask)
710{
711 u32 data[3] = { 0, 0, 0 };
712 u32 tag[3] = { 0, 0, 0 };
713 u32 filter[2] = { 0, 0 };
714
715 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
716 if (tag[0] && tag[1] && tag[2])
717 writel_relaxed(
718 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
719 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
720 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
721 l2x0_base + L2X0_TAG_LATENCY_CTRL);
722
723 of_property_read_u32_array(np, "arm,data-latency",
724 data, ARRAY_SIZE(data));
725 if (data[0] && data[1] && data[2])
726 writel_relaxed(
727 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
728 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
729 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
730 l2x0_base + L2X0_DATA_LATENCY_CTRL);
731
732 of_property_read_u32_array(np, "arm,filter-ranges",
733 filter, ARRAY_SIZE(filter));
734 if (filter[1]) {
735 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
736 l2x0_base + L2X0_ADDR_FILTER_END);
737 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
738 l2x0_base + L2X0_ADDR_FILTER_START);
739 }
740}
741
Russell Kingda3627f2014-03-15 16:48:06 +0000742static const struct l2c_init_data of_pl310_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000743 .num_lock = 8,
Russell Kingda3627f2014-03-15 16:48:06 +0000744 .of_parse = pl310_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000745 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +0000746 .fixup = l2c310_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +0000747 .save = pl310_save,
748 .outer_cache = {
749 .inv_range = l2x0_inv_range,
750 .clean_range = l2x0_clean_range,
751 .flush_range = l2x0_flush_range,
752 .flush_all = l2x0_flush_all,
753 .disable = l2x0_disable,
754 .sync = l2x0_cache_sync,
755 .resume = pl310_resume,
756 },
757};
758
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100759/*
760 * Note that the end addresses passed to Linux primitives are
761 * noninclusive, while the hardware cache range operations use
762 * inclusive start and end addresses.
763 */
764static unsigned long calc_range_end(unsigned long start, unsigned long end)
765{
766 /*
767 * Limit the number of cache lines processed at once,
768 * since cache range operations stall the CPU pipeline
769 * until completion.
770 */
771 if (end > start + MAX_RANGE_SIZE)
772 end = start + MAX_RANGE_SIZE;
773
774 /*
775 * Cache range operations can't straddle a page boundary.
776 */
777 if (end > PAGE_ALIGN(start+1))
778 end = PAGE_ALIGN(start+1);
779
780 return end;
781}
782
783/*
784 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
785 * and range operations only do a TLB lookup on the start address.
786 */
787static void aurora_pa_range(unsigned long start, unsigned long end,
788 unsigned long offset)
789{
790 unsigned long flags;
791
792 raw_spin_lock_irqsave(&l2x0_lock, flags);
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +0100793 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
794 writel_relaxed(end, l2x0_base + offset);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100795 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
796
797 cache_sync();
798}
799
800static void aurora_inv_range(unsigned long start, unsigned long end)
801{
802 /*
803 * round start and end adresses up to cache line size
804 */
805 start &= ~(CACHE_LINE_SIZE - 1);
806 end = ALIGN(end, CACHE_LINE_SIZE);
807
808 /*
809 * Invalidate all full cache lines between 'start' and 'end'.
810 */
811 while (start < end) {
812 unsigned long range_end = calc_range_end(start, end);
813 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
814 AURORA_INVAL_RANGE_REG);
815 start = range_end;
816 }
817}
818
819static void aurora_clean_range(unsigned long start, unsigned long end)
820{
821 /*
822 * If L2 is forced to WT, the L2 will always be clean and we
823 * don't need to do anything here.
824 */
825 if (!l2_wt_override) {
826 start &= ~(CACHE_LINE_SIZE - 1);
827 end = ALIGN(end, CACHE_LINE_SIZE);
828 while (start != end) {
829 unsigned long range_end = calc_range_end(start, end);
830 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
831 AURORA_CLEAN_RANGE_REG);
832 start = range_end;
833 }
834 }
835}
836
837static void aurora_flush_range(unsigned long start, unsigned long end)
838{
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100839 start &= ~(CACHE_LINE_SIZE - 1);
840 end = ALIGN(end, CACHE_LINE_SIZE);
841 while (start != end) {
842 unsigned long range_end = calc_range_end(start, end);
843 /*
844 * If L2 is forced to WT, the L2 will always be clean and we
845 * just need to invalidate.
846 */
847 if (l2_wt_override)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100848 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100849 AURORA_INVAL_RANGE_REG);
850 else
851 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
852 AURORA_FLUSH_RANGE_REG);
853 start = range_end;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100854 }
855}
856
Russell Kingda3627f2014-03-15 16:48:06 +0000857static void aurora_save(void __iomem *base)
858{
859 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
860 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
861}
862
863static void aurora_resume(void)
864{
865 if (!(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
866 writel_relaxed(l2x0_saved_regs.aux_ctrl,
867 l2x0_base + L2X0_AUX_CTRL);
868 writel_relaxed(l2x0_saved_regs.ctrl, l2x0_base + L2X0_CTRL);
869 }
870}
871
Russell King40266d62014-03-15 16:47:59 +0000872/*
873 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
874 * broadcasting of cache commands to L2.
875 */
876static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
877 unsigned num_lock)
Russell Kingda3627f2014-03-15 16:48:06 +0000878{
Russell King40266d62014-03-15 16:47:59 +0000879 u32 u;
880
881 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
Russell Kingda3627f2014-03-15 16:48:06 +0000882 u |= AURORA_CTRL_FW; /* Set the FW bit */
Russell King40266d62014-03-15 16:47:59 +0000883 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
884
Russell Kingda3627f2014-03-15 16:48:06 +0000885 isb();
Russell King40266d62014-03-15 16:47:59 +0000886
887 l2c_enable(base, aux, num_lock);
Russell Kingda3627f2014-03-15 16:48:06 +0000888}
889
Russell King75461f52014-03-15 16:48:07 +0000890static void __init aurora_fixup(void __iomem *base, u32 cache_id,
891 struct outer_cache_fns *fns)
892{
893 sync_reg_offset = AURORA_SYNC_REG;
894}
895
Russell Kingda3627f2014-03-15 16:48:06 +0000896static void __init aurora_of_parse(const struct device_node *np,
897 u32 *aux_val, u32 *aux_mask)
898{
899 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
900 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
901
902 of_property_read_u32(np, "cache-id-part",
903 &cache_id_part_number_from_dt);
904
905 /* Determine and save the write policy */
906 l2_wt_override = of_property_read_bool(np, "wt-override");
907
908 if (l2_wt_override) {
909 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
910 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
911 }
912
913 *aux_val &= ~mask;
914 *aux_val |= val;
915 *aux_mask &= ~mask;
916}
917
918static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000919 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +0000920 .of_parse = aurora_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000921 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +0000922 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +0000923 .save = aurora_save,
924 .outer_cache = {
925 .inv_range = aurora_inv_range,
926 .clean_range = aurora_clean_range,
927 .flush_range = aurora_flush_range,
928 .flush_all = l2x0_flush_all,
929 .disable = l2x0_disable,
930 .sync = l2x0_cache_sync,
931 .resume = aurora_resume,
932 },
933};
934
935static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000936 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +0000937 .of_parse = aurora_of_parse,
Russell King40266d62014-03-15 16:47:59 +0000938 .enable = aurora_enable_no_outer,
Russell King75461f52014-03-15 16:48:07 +0000939 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +0000940 .save = aurora_save,
941 .outer_cache = {
942 .resume = aurora_resume,
943 },
944};
945
Christian Daudt3b656fe2013-05-09 22:21:01 +0100946/*
947 * For certain Broadcom SoCs, depending on the address range, different offsets
948 * need to be added to the address before passing it to L2 for
949 * invalidation/clean/flush
950 *
951 * Section Address Range Offset EMI
952 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
953 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
954 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
955 *
956 * When the start and end addresses have crossed two different sections, we
957 * need to break the L2 operation into two, each within its own section.
958 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
959 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
960 * 0xC0000000 - 0xC0001000
961 *
962 * Note 1:
963 * By breaking a single L2 operation into two, we may potentially suffer some
964 * performance hit, but keep in mind the cross section case is very rare
965 *
966 * Note 2:
967 * We do not need to handle the case when the start address is in
968 * Section 1 and the end address is in Section 3, since it is not a valid use
969 * case
970 *
971 * Note 3:
972 * Section 1 in practical terms can no longer be used on rev A2. Because of
973 * that the code does not need to handle section 1 at all.
974 *
975 */
976#define BCM_SYS_EMI_START_ADDR 0x40000000UL
977#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
978
979#define BCM_SYS_EMI_OFFSET 0x40000000UL
980#define BCM_VC_EMI_OFFSET 0x80000000UL
981
982static inline int bcm_addr_is_sys_emi(unsigned long addr)
983{
984 return (addr >= BCM_SYS_EMI_START_ADDR) &&
985 (addr < BCM_VC_EMI_SEC3_START_ADDR);
986}
987
988static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
989{
990 if (bcm_addr_is_sys_emi(addr))
991 return addr + BCM_SYS_EMI_OFFSET;
992 else
993 return addr + BCM_VC_EMI_OFFSET;
994}
995
996static void bcm_inv_range(unsigned long start, unsigned long end)
997{
998 unsigned long new_start, new_end;
999
1000 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1001
1002 if (unlikely(end <= start))
1003 return;
1004
1005 new_start = bcm_l2_phys_addr(start);
1006 new_end = bcm_l2_phys_addr(end);
1007
1008 /* normal case, no cross section between start and end */
1009 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1010 l2x0_inv_range(new_start, new_end);
1011 return;
1012 }
1013
1014 /* They cross sections, so it can only be a cross from section
1015 * 2 to section 3
1016 */
1017 l2x0_inv_range(new_start,
1018 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1019 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1020 new_end);
1021}
1022
1023static void bcm_clean_range(unsigned long start, unsigned long end)
1024{
1025 unsigned long new_start, new_end;
1026
1027 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1028
1029 if (unlikely(end <= start))
1030 return;
1031
1032 if ((end - start) >= l2x0_size) {
1033 l2x0_clean_all();
1034 return;
1035 }
1036
1037 new_start = bcm_l2_phys_addr(start);
1038 new_end = bcm_l2_phys_addr(end);
1039
1040 /* normal case, no cross section between start and end */
1041 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1042 l2x0_clean_range(new_start, new_end);
1043 return;
1044 }
1045
1046 /* They cross sections, so it can only be a cross from section
1047 * 2 to section 3
1048 */
1049 l2x0_clean_range(new_start,
1050 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1051 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1052 new_end);
1053}
1054
1055static void bcm_flush_range(unsigned long start, unsigned long end)
1056{
1057 unsigned long new_start, new_end;
1058
1059 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1060
1061 if (unlikely(end <= start))
1062 return;
1063
1064 if ((end - start) >= l2x0_size) {
1065 l2x0_flush_all();
1066 return;
1067 }
1068
1069 new_start = bcm_l2_phys_addr(start);
1070 new_end = bcm_l2_phys_addr(end);
1071
1072 /* normal case, no cross section between start and end */
1073 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1074 l2x0_flush_range(new_start, new_end);
1075 return;
1076 }
1077
1078 /* They cross sections, so it can only be a cross from section
1079 * 2 to section 3
1080 */
1081 l2x0_flush_range(new_start,
1082 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1083 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1084 new_end);
1085}
1086
Russell Kingda3627f2014-03-15 16:48:06 +00001087static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +00001088 .num_lock = 8,
Russell Kingda3627f2014-03-15 16:48:06 +00001089 .of_parse = pl310_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +00001090 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +00001091 .fixup = l2c310_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +00001092 .save = pl310_save,
1093 .outer_cache = {
1094 .inv_range = bcm_inv_range,
1095 .clean_range = bcm_clean_range,
1096 .flush_range = bcm_flush_range,
1097 .flush_all = l2x0_flush_all,
1098 .disable = l2x0_disable,
1099 .sync = l2x0_cache_sync,
1100 .resume = pl310_resume,
1101 },
1102};
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001103
Russell King9846dfc2014-03-15 16:47:55 +00001104static void __init tauros3_save(void __iomem *base)
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001105{
1106 l2x0_saved_regs.aux2_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001107 readl_relaxed(base + TAUROS3_AUX2_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001108 l2x0_saved_regs.prefetch_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001109 readl_relaxed(base + L2X0_PREFETCH_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001110}
1111
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001112static void tauros3_resume(void)
1113{
1114 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1115 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1116 l2x0_base + TAUROS3_AUX2_CTRL);
1117 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1118 l2x0_base + L2X0_PREFETCH_CTRL);
1119 }
1120
1121 l2x0_resume();
1122}
1123
Russell Kingc02642b2014-03-15 16:47:54 +00001124static const struct l2c_init_data of_tauros3_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +00001125 .num_lock = 8,
1126 .enable = l2c_enable,
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001127 .save = tauros3_save,
1128 /* Tauros3 broadcasts L1 cache operations to L2 */
1129 .outer_cache = {
1130 .resume = tauros3_resume,
1131 },
1132};
1133
Russell Kinga65bb922014-03-15 16:48:01 +00001134#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
Rob Herring8c369262011-08-03 18:12:05 +01001135static const struct of_device_id l2x0_ids[] __initconst = {
Russell Kingc02642b2014-03-15 16:47:54 +00001136 L2C_ID("arm,l210-cache", of_l2x0_data),
1137 L2C_ID("arm,l220-cache", of_l2x0_data),
1138 L2C_ID("arm,pl310-cache", of_pl310_data),
1139 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1140 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1141 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1142 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
Russell Kinga65bb922014-03-15 16:48:01 +00001143 /* Deprecated IDs */
Russell Kingc02642b2014-03-15 16:47:54 +00001144 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
Rob Herring8c369262011-08-03 18:12:05 +01001145 {}
1146};
1147
Russell King3e175ca2011-09-18 11:27:30 +01001148int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +01001149{
Russell Kingc02642b2014-03-15 16:47:54 +00001150 const struct l2c_init_data *data;
Rob Herring8c369262011-08-03 18:12:05 +01001151 struct device_node *np;
Barry Song91c2ebb2011-09-30 14:43:12 +01001152 struct resource res;
Russell King96054b02014-03-15 16:47:52 +00001153 u32 cache_id;
Rob Herring8c369262011-08-03 18:12:05 +01001154
1155 np = of_find_matching_node(NULL, l2x0_ids);
1156 if (!np)
1157 return -ENODEV;
Barry Song91c2ebb2011-09-30 14:43:12 +01001158
1159 if (of_address_to_resource(np, 0, &res))
1160 return -ENODEV;
1161
1162 l2x0_base = ioremap(res.start, resource_size(&res));
Rob Herring8c369262011-08-03 18:12:05 +01001163 if (!l2x0_base)
1164 return -ENOMEM;
1165
Barry Song91c2ebb2011-09-30 14:43:12 +01001166 l2x0_saved_regs.phy_base = res.start;
1167
1168 data = of_match_node(l2x0_ids, np)->data;
1169
Rob Herring8c369262011-08-03 18:12:05 +01001170 /* L2 configuration can only be changed if the cache is disabled */
Russell King40266d62014-03-15 16:47:59 +00001171 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
Russell Kingc02642b2014-03-15 16:47:54 +00001172 if (data->of_parse)
1173 data->of_parse(np, &aux_val, &aux_mask);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001174
Russell King96054b02014-03-15 16:47:52 +00001175 if (cache_id_part_number_from_dt)
1176 cache_id = cache_id_part_number_from_dt;
1177 else
1178 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1179
1180 __l2c_init(data, aux_val, aux_mask, cache_id);
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001181
Rob Herring8c369262011-08-03 18:12:05 +01001182 return 0;
1183}
1184#endif