blob: 157fd7ae331a929a062f1604aa7ec2e986627bca [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
Rob Herring8c369262011-08-03 18:12:05 +010019#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010020#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Rob Herring8c369262011-08-03 18:12:05 +010023#include <linux/of.h>
24#include <linux/of_address.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025
26#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010027#include <asm/hardware/cache-l2x0.h>
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +010028#include "cache-tauros3.h"
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +010029#include "cache-aurora-l2.h"
Catalin Marinas382266a2007-02-05 14:48:19 +010030
Russell Kingc02642b2014-03-15 16:47:54 +000031struct l2c_init_data {
Russell King3b8bad52014-03-15 16:47:57 +000032 unsigned num_lock;
Russell Kingc02642b2014-03-15 16:47:54 +000033 void (*of_parse)(const struct device_node *, u32 *, u32 *);
Russell King3b8bad52014-03-15 16:47:57 +000034 void (*enable)(void __iomem *, u32, unsigned);
Russell King75461f52014-03-15 16:48:07 +000035 void (*fixup)(void __iomem *, u32, struct outer_cache_fns *);
Russell King9846dfc2014-03-15 16:47:55 +000036 void (*save)(void __iomem *);
Russell Kingc02642b2014-03-15 16:47:54 +000037 struct outer_cache_fns outer_cache;
38};
39
Catalin Marinas382266a2007-02-05 14:48:19 +010040#define CACHE_LINE_SIZE 32
41
42static void __iomem *l2x0_base;
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050043static DEFINE_RAW_SPINLOCK(l2x0_lock);
Russell King3e175ca2011-09-18 11:27:30 +010044static u32 l2x0_way_mask; /* Bitmask of active ways */
45static u32 l2x0_size;
Will Deaconf154fe92012-04-20 17:21:08 +010046static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
Catalin Marinas382266a2007-02-05 14:48:19 +010047
Barry Song91c2ebb2011-09-30 14:43:12 +010048struct l2x0_regs l2x0_saved_regs;
49
Russell King37abcdb2014-03-15 16:47:50 +000050/*
51 * Common code for all cache controllers.
52 */
Russell King83841fe2014-03-15 16:48:14 +000053static inline void l2c_wait_mask(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010054{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010055 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010056 while (readl_relaxed(reg) & mask)
Barry Song1caf3092011-09-09 10:30:34 +010057 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010058}
59
Russell King2b2a87a2014-03-16 17:19:21 +000060/*
61 * This should only be called when we have a requirement that the
62 * register be written due to a work-around, as platforms running
63 * in non-secure mode may not be able to access this register.
64 */
65static inline void l2c_set_debug(void __iomem *base, unsigned long val)
66{
67 outer_cache.set_debug(val);
68}
69
Russell Kingdf5dd4c2014-03-15 16:47:56 +000070static void __l2c_op_way(void __iomem *reg)
71{
72 writel_relaxed(l2x0_way_mask, reg);
Russell King83841fe2014-03-15 16:48:14 +000073 l2c_wait_mask(reg, l2x0_way_mask);
Russell Kingdf5dd4c2014-03-15 16:47:56 +000074}
75
Russell King37abcdb2014-03-15 16:47:50 +000076static inline void l2c_unlock(void __iomem *base, unsigned num)
77{
78 unsigned i;
79
80 for (i = 0; i < num; i++) {
81 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_D_BASE +
82 i * L2X0_LOCKDOWN_STRIDE);
83 writel_relaxed(0, base + L2X0_LOCKDOWN_WAY_I_BASE +
84 i * L2X0_LOCKDOWN_STRIDE);
85 }
86}
87
Russell King3b8bad52014-03-15 16:47:57 +000088/*
89 * Enable the L2 cache controller. This function must only be
90 * called when the cache controller is known to be disabled.
91 */
92static void l2c_enable(void __iomem *base, u32 aux, unsigned num_lock)
93{
94 unsigned long flags;
95
Russell King9a07f272014-03-17 20:10:31 +000096 /* Only write the aux register if it needs changing */
97 if (readl_relaxed(base + L2X0_AUX_CTRL) != aux)
98 writel_relaxed(aux, base + L2X0_AUX_CTRL);
Russell King3b8bad52014-03-15 16:47:57 +000099
Russell King17f3f992014-03-17 17:15:02 +0000100 l2c_unlock(base, num_lock);
101
Russell King3b8bad52014-03-15 16:47:57 +0000102 local_irq_save(flags);
103 __l2c_op_way(base + L2X0_INV_WAY);
104 writel_relaxed(0, base + sync_reg_offset);
105 l2c_wait_mask(base + sync_reg_offset, 1);
106 local_irq_restore(flags);
107
108 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
109}
110
111static void l2c_disable(void)
112{
113 void __iomem *base = l2x0_base;
114
115 outer_cache.flush_all();
116 writel_relaxed(0, base + L2X0_CTRL);
117 dsb(st);
118}
119
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100120#ifdef CONFIG_CACHE_PL310
121static inline void cache_wait(void __iomem *reg, unsigned long mask)
122{
123 /* cache operations by line are atomic on PL310 */
124}
125#else
Russell King83841fe2014-03-15 16:48:14 +0000126#define cache_wait l2c_wait_mask
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100127#endif
128
Catalin Marinas382266a2007-02-05 14:48:19 +0100129static inline void cache_sync(void)
130{
Russell King3d107432009-11-19 11:41:09 +0000131 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +0100132
Will Deaconf154fe92012-04-20 17:21:08 +0100133 writel_relaxed(0, base + sync_reg_offset);
Russell King3d107432009-11-19 11:41:09 +0000134 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100135}
136
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100137static inline void l2x0_clean_line(unsigned long addr)
138{
139 void __iomem *base = l2x0_base;
140 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100141 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100142}
143
144static inline void l2x0_inv_line(unsigned long addr)
145{
146 void __iomem *base = l2x0_base;
147 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100148 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100149}
150
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100151#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Will Deaconab4d5362012-04-20 17:22:11 +0100152static inline void debug_writel(unsigned long val)
153{
154 if (outer_cache.set_debug)
Russell King2b2a87a2014-03-16 17:19:21 +0000155 l2c_set_debug(l2x0_base, val);
Will Deaconab4d5362012-04-20 17:22:11 +0100156}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100157
Will Deaconab4d5362012-04-20 17:22:11 +0100158static void pl310_set_debug(unsigned long val)
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100159{
160 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
161}
162#else
163/* Optimised out for non-errata case */
164static inline void debug_writel(unsigned long val)
165{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100166}
167
Will Deaconab4d5362012-04-20 17:22:11 +0100168#define pl310_set_debug NULL
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100169#endif
170
171#ifdef CONFIG_PL310_ERRATA_588369
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100172static inline void l2x0_flush_line(unsigned long addr)
173{
174 void __iomem *base = l2x0_base;
175
176 /* Clean by PA followed by Invalidate by PA */
177 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100178 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100179 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100180 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100181}
182#else
183
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100184static inline void l2x0_flush_line(unsigned long addr)
185{
186 void __iomem *base = l2x0_base;
187 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100188 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100189}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100190#endif
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100191
Catalin Marinas23107c52010-03-24 16:48:53 +0100192static void l2x0_cache_sync(void)
193{
194 unsigned long flags;
195
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500196 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100197 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500198 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100199}
200
Will Deacon38a89142011-07-01 14:36:19 +0100201static void __l2x0_flush_all(void)
202{
203 debug_writel(0x03);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000204 __l2c_op_way(l2x0_base + L2X0_CLEAN_INV_WAY);
Will Deacon38a89142011-07-01 14:36:19 +0100205 cache_sync();
206 debug_writel(0x00);
207}
208
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530209static void l2x0_flush_all(void)
210{
211 unsigned long flags;
212
213 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500214 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100215 __l2x0_flush_all();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500216 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530217}
218
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530219static void l2x0_clean_all(void)
220{
221 unsigned long flags;
222
223 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500224 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000225 __l2c_op_way(l2x0_base + L2X0_CLEAN_WAY);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530226 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500227 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530228}
229
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530230static void l2x0_inv_all(void)
Catalin Marinas382266a2007-02-05 14:48:19 +0100231{
Russell King0eb948d2009-11-19 11:12:15 +0000232 unsigned long flags;
233
Catalin Marinas382266a2007-02-05 14:48:19 +0100234 /* invalidate all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500235 raw_spin_lock_irqsave(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530236 /* Invalidating when L2 is enabled is a nono */
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100237 BUG_ON(readl(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN);
Russell Kingdf5dd4c2014-03-15 16:47:56 +0000238 __l2c_op_way(l2x0_base + L2X0_INV_WAY);
Catalin Marinas382266a2007-02-05 14:48:19 +0100239 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500240 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100241}
242
243static void l2x0_inv_range(unsigned long start, unsigned long end)
244{
Russell King3d107432009-11-19 11:41:09 +0000245 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000246 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100247
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500248 raw_spin_lock_irqsave(&l2x0_lock, flags);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100249 if (start & (CACHE_LINE_SIZE - 1)) {
250 start &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100251 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100252 l2x0_flush_line(start);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100253 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100254 start += CACHE_LINE_SIZE;
255 }
256
257 if (end & (CACHE_LINE_SIZE - 1)) {
258 end &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100259 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100260 l2x0_flush_line(end);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100261 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100262 }
263
Russell King0eb948d2009-11-19 11:12:15 +0000264 while (start < end) {
265 unsigned long blk_end = start + min(end - start, 4096UL);
266
267 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100268 l2x0_inv_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000269 start += CACHE_LINE_SIZE;
270 }
271
272 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500273 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
274 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000275 }
276 }
Russell King3d107432009-11-19 11:41:09 +0000277 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100278 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500279 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100280}
281
282static void l2x0_clean_range(unsigned long start, unsigned long end)
283{
Russell King3d107432009-11-19 11:41:09 +0000284 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000285 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100286
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530287 if ((end - start) >= l2x0_size) {
288 l2x0_clean_all();
289 return;
290 }
291
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500292 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100293 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000294 while (start < end) {
295 unsigned long blk_end = start + min(end - start, 4096UL);
296
297 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100298 l2x0_clean_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000299 start += CACHE_LINE_SIZE;
300 }
301
302 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500303 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
304 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000305 }
306 }
Russell King3d107432009-11-19 11:41:09 +0000307 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100308 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500309 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100310}
311
312static void l2x0_flush_range(unsigned long start, unsigned long end)
313{
Russell King3d107432009-11-19 11:41:09 +0000314 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000315 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100316
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530317 if ((end - start) >= l2x0_size) {
318 l2x0_flush_all();
319 return;
320 }
321
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500322 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100323 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000324 while (start < end) {
325 unsigned long blk_end = start + min(end - start, 4096UL);
326
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100327 debug_writel(0x03);
Russell King0eb948d2009-11-19 11:12:15 +0000328 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100329 l2x0_flush_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000330 start += CACHE_LINE_SIZE;
331 }
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100332 debug_writel(0x00);
Russell King0eb948d2009-11-19 11:12:15 +0000333
334 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500335 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
336 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000337 }
338 }
Russell King3d107432009-11-19 11:41:09 +0000339 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100340 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500341 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100342}
343
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530344static void l2x0_disable(void)
345{
346 unsigned long flags;
347
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500348 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100349 __l2x0_flush_all();
350 writel_relaxed(0, l2x0_base + L2X0_CTRL);
Will Deacon9781aa82013-06-12 09:59:59 +0100351 dsb(st);
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500352 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530353}
354
Russell King3b8bad52014-03-15 16:47:57 +0000355static void l2x0_enable(void __iomem *base, u32 aux, unsigned num_lock)
356{
Russell Kingfaf9b2e2014-03-15 22:49:59 +0000357 unsigned id;
358
359 id = readl_relaxed(base + L2X0_CACHE_ID) & L2X0_CACHE_ID_PART_MASK;
360 if (id == L2X0_CACHE_ID_PART_L310)
361 num_lock = 8;
362 else
363 num_lock = 1;
364
Russell King3b8bad52014-03-15 16:47:57 +0000365 /* l2x0 controller is disabled */
366 writel_relaxed(aux, base + L2X0_AUX_CTRL);
367
Russell King17f3f992014-03-17 17:15:02 +0000368 /* Make sure that I&D is not locked down when starting */
Russell Kingfaf9b2e2014-03-15 22:49:59 +0000369 l2c_unlock(base, num_lock);
Russell King17f3f992014-03-17 17:15:02 +0000370
Russell King3b8bad52014-03-15 16:47:57 +0000371 l2x0_inv_all();
372
373 /* enable L2X0 */
374 writel_relaxed(L2X0_CTRL_EN, base + L2X0_CTRL);
375}
376
Russell Kingb98556f22014-03-15 16:48:11 +0000377static void l2x0_resume(void)
378{
Russell King09a5d182014-03-15 16:48:13 +0000379 void __iomem *base = l2x0_base;
Russell Kingb98556f22014-03-15 16:48:11 +0000380
Russell King09a5d182014-03-15 16:48:13 +0000381 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN))
382 l2x0_enable(base, l2x0_saved_regs.aux_ctrl, 0);
Russell Kingb98556f22014-03-15 16:48:11 +0000383}
384
Russell King96054b02014-03-15 16:47:52 +0000385static const struct l2c_init_data l2x0_init_fns __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000386 .enable = l2x0_enable,
Russell King96054b02014-03-15 16:47:52 +0000387 .outer_cache = {
388 .inv_range = l2x0_inv_range,
389 .clean_range = l2x0_clean_range,
390 .flush_range = l2x0_flush_range,
391 .flush_all = l2x0_flush_all,
392 .disable = l2x0_disable,
393 .sync = l2x0_cache_sync,
Russell Kingb98556f22014-03-15 16:48:11 +0000394 .resume = l2x0_resume,
Russell King96054b02014-03-15 16:47:52 +0000395 },
396};
397
Russell King75461f52014-03-15 16:48:07 +0000398/*
399 * L2C-310 specific code.
400 *
401 * Errata:
402 * 588369: PL310 R0P0->R1P0, fixed R2P0.
403 * Affects: all clean+invalidate operations
404 * clean and invalidate skips the invalidate step, so we need to issue
405 * separate operations. We also require the above debug workaround
406 * enclosing this code fragment on affected parts. On unaffected parts,
407 * we must not use this workaround without the debug register writes
408 * to avoid exposing a problem similar to 727915.
409 *
410 * 727915: PL310 R2P0->R3P0, fixed R3P1.
411 * Affects: clean+invalidate by way
412 * clean and invalidate by way runs in the background, and a store can
413 * hit the line between the clean operation and invalidate operation,
414 * resulting in the store being lost.
415 *
416 * 753970: PL310 R3P0, fixed R3P1.
417 * Affects: sync
418 * prevents merging writes after the sync operation, until another L2C
419 * operation is performed (or a number of other conditions.)
420 *
421 * 769419: PL310 R0P0->R3P1, fixed R3P2.
422 * Affects: store buffer
423 * store buffer is not automatically drained.
424 */
Russell King09a5d182014-03-15 16:48:13 +0000425static void __init l2c310_save(void __iomem *base)
Russell Kingb98556f22014-03-15 16:48:11 +0000426{
Russell King09a5d182014-03-15 16:48:13 +0000427 unsigned revision;
Russell Kingb98556f22014-03-15 16:48:11 +0000428
429 l2x0_saved_regs.tag_latency = readl_relaxed(base +
430 L2X0_TAG_LATENCY_CTRL);
431 l2x0_saved_regs.data_latency = readl_relaxed(base +
432 L2X0_DATA_LATENCY_CTRL);
433 l2x0_saved_regs.filter_end = readl_relaxed(base +
434 L2X0_ADDR_FILTER_END);
435 l2x0_saved_regs.filter_start = readl_relaxed(base +
436 L2X0_ADDR_FILTER_START);
437
Russell King09a5d182014-03-15 16:48:13 +0000438 revision = readl_relaxed(base + L2X0_CACHE_ID) &
Russell Kingb98556f22014-03-15 16:48:11 +0000439 L2X0_CACHE_ID_RTL_MASK;
440
Russell King09a5d182014-03-15 16:48:13 +0000441 /* From r2p0, there is Prefetch offset/control register */
442 if (revision >= L310_CACHE_ID_RTL_R2P0)
443 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(base +
444 L2X0_PREFETCH_CTRL);
Russell Kingb98556f22014-03-15 16:48:11 +0000445
Russell King09a5d182014-03-15 16:48:13 +0000446 /* From r3p0, there is Power control register */
447 if (revision >= L310_CACHE_ID_RTL_R3P0)
448 l2x0_saved_regs.pwr_ctrl = readl_relaxed(base +
449 L2X0_POWER_CTRL);
450}
451
452static void l2c310_resume(void)
453{
454 void __iomem *base = l2x0_base;
455
456 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
457 unsigned revision;
458
459 /* restore pl310 setup */
460 writel_relaxed(l2x0_saved_regs.tag_latency,
461 base + L2X0_TAG_LATENCY_CTRL);
462 writel_relaxed(l2x0_saved_regs.data_latency,
463 base + L2X0_DATA_LATENCY_CTRL);
464 writel_relaxed(l2x0_saved_regs.filter_end,
465 base + L2X0_ADDR_FILTER_END);
466 writel_relaxed(l2x0_saved_regs.filter_start,
467 base + L2X0_ADDR_FILTER_START);
468
469 revision = readl_relaxed(base + L2X0_CACHE_ID) &
470 L2X0_CACHE_ID_RTL_MASK;
471
472 if (revision >= L310_CACHE_ID_RTL_R2P0)
473 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
474 base + L2X0_PREFETCH_CTRL);
475 if (revision >= L310_CACHE_ID_RTL_R3P0)
476 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
477 base + L2X0_POWER_CTRL);
478
479 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
480 }
Russell Kingb98556f22014-03-15 16:48:11 +0000481}
482
Russell King75461f52014-03-15 16:48:07 +0000483static void __init l2c310_fixup(void __iomem *base, u32 cache_id,
484 struct outer_cache_fns *fns)
485{
486 unsigned revision = cache_id & L2X0_CACHE_ID_RTL_MASK;
487 const char *errata[4];
488 unsigned n = 0;
489
490 if (revision <= L310_CACHE_ID_RTL_R3P0)
491 fns->set_debug = pl310_set_debug;
492
493 if (IS_ENABLED(CONFIG_PL310_ERRATA_753970) &&
494 revision == L310_CACHE_ID_RTL_R3P0) {
495 sync_reg_offset = L2X0_DUMMY_REG;
496 errata[n++] = "753970";
497 }
498
499 if (IS_ENABLED(CONFIG_PL310_ERRATA_769419))
500 errata[n++] = "769419";
501
502 if (n) {
503 unsigned i;
504
505 pr_info("L2C-310 errat%s", n > 1 ? "a" : "um");
506 for (i = 0; i < n; i++)
507 pr_cont(" %s", errata[i]);
508 pr_cont(" enabled\n");
509 }
510}
511
512static const struct l2c_init_data l2c310_init_fns __initconst = {
513 .num_lock = 8,
514 .enable = l2c_enable,
515 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000516 .save = l2c310_save,
Russell King75461f52014-03-15 16:48:07 +0000517 .outer_cache = {
518 .inv_range = l2x0_inv_range,
519 .clean_range = l2x0_clean_range,
520 .flush_range = l2x0_flush_range,
521 .flush_all = l2x0_flush_all,
522 .disable = l2x0_disable,
523 .sync = l2x0_cache_sync,
Russell King09a5d182014-03-15 16:48:13 +0000524 .resume = l2c310_resume,
Russell King75461f52014-03-15 16:48:07 +0000525 },
526};
527
Russell King96054b02014-03-15 16:47:52 +0000528static void __init __l2c_init(const struct l2c_init_data *data,
529 u32 aux_val, u32 aux_mask, u32 cache_id)
Catalin Marinas382266a2007-02-05 14:48:19 +0100530{
Russell King75461f52014-03-15 16:48:07 +0000531 struct outer_cache_fns fns;
Russell King3e175ca2011-09-18 11:27:30 +0100532 u32 aux;
Russell King3e175ca2011-09-18 11:27:30 +0100533 u32 way_size = 0;
Jason McMullan64039be2010-05-05 18:59:37 +0100534 int ways;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100535 int way_size_shift = L2X0_WAY_SIZE_SHIFT;
Jason McMullan64039be2010-05-05 18:59:37 +0100536 const char *type;
Catalin Marinas382266a2007-02-05 14:48:19 +0100537
Russell Kingc40e7eb2014-03-15 16:48:04 +0000538 /*
539 * It is strange to save the register state before initialisation,
540 * but hey, this is what the DT implementations decided to do.
541 */
542 if (data->save)
543 data->save(l2x0_base);
544
Catalin Marinas6775a552010-07-28 22:01:25 +0100545 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100546
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100547 aux &= aux_mask;
548 aux |= aux_val;
549
Jason McMullan64039be2010-05-05 18:59:37 +0100550 /* Determine the number of ways */
Rob Herring6e7acee2013-03-25 17:02:48 +0100551 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100552 case L2X0_CACHE_ID_PART_L310:
553 if (aux & (1 << 16))
554 ways = 16;
555 else
556 ways = 8;
557 type = "L310";
558 break;
Russell King75461f52014-03-15 16:48:07 +0000559
Jason McMullan64039be2010-05-05 18:59:37 +0100560 case L2X0_CACHE_ID_PART_L210:
561 ways = (aux >> 13) & 0xf;
562 type = "L210";
563 break;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100564
565 case AURORA_CACHE_ID:
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100566 ways = (aux >> 13) & 0xf;
567 ways = 2 << ((ways + 1) >> 2);
568 way_size_shift = AURORA_WAY_SIZE_SHIFT;
569 type = "Aurora";
570 break;
Russell King75461f52014-03-15 16:48:07 +0000571
Jason McMullan64039be2010-05-05 18:59:37 +0100572 default:
573 /* Assume unknown chips have 8 ways */
574 ways = 8;
575 type = "L2x0 series";
576 break;
577 }
578
579 l2x0_way_mask = (1 << ways) - 1;
580
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100581 /*
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530582 * L2 cache Size = Way size * Number of ways
583 */
584 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100585 way_size = 1 << (way_size + way_size_shift);
586
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530587 l2x0_size = ways * way_size * SZ_1K;
588
Russell King75461f52014-03-15 16:48:07 +0000589 fns = data->outer_cache;
590 if (data->fixup)
591 data->fixup(l2x0_base, cache_id, &fns);
592
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530593 /*
Russell King3b8bad52014-03-15 16:47:57 +0000594 * Check if l2x0 controller is already enabled. If we are booting
595 * in non-secure mode accessing the below registers will fault.
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100596 */
Russell King3b8bad52014-03-15 16:47:57 +0000597 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
598 data->enable(l2x0_base, aux, data->num_lock);
Catalin Marinas382266a2007-02-05 14:48:19 +0100599
Yilu Mao9d4876f2012-09-03 09:14:56 +0100600 /* Re-read it in case some bits are reserved. */
601 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
602
603 /* Save the value for resuming. */
604 l2x0_saved_regs.aux_ctrl = aux;
605
Russell King75461f52014-03-15 16:48:07 +0000606 outer_cache = fns;
Catalin Marinas382266a2007-02-05 14:48:19 +0100607
Russell Kingcdef8682014-03-15 16:48:08 +0000608 pr_info("%s cache controller enabled, %d ways, %d kB\n",
609 type, ways, l2x0_size >> 10);
610 pr_info("%s: CACHE_ID 0x%08x, AUX_CTRL 0x%08x\n",
611 type, cache_id, aux);
Catalin Marinas382266a2007-02-05 14:48:19 +0100612}
Rob Herring8c369262011-08-03 18:12:05 +0100613
Russell King96054b02014-03-15 16:47:52 +0000614void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
615{
Russell King75461f52014-03-15 16:48:07 +0000616 const struct l2c_init_data *data;
Russell King96054b02014-03-15 16:47:52 +0000617 u32 cache_id;
618
619 l2x0_base = base;
620
621 cache_id = readl_relaxed(base + L2X0_CACHE_ID);
622
Russell King75461f52014-03-15 16:48:07 +0000623 switch (cache_id & L2X0_CACHE_ID_PART_MASK) {
624 default:
625 data = &l2x0_init_fns;
626 break;
627
628 case L2X0_CACHE_ID_PART_L310:
629 data = &l2c310_init_fns;
630 break;
631 }
632
633 __l2c_init(data, aux_val, aux_mask, cache_id);
Russell King96054b02014-03-15 16:47:52 +0000634}
635
Rob Herring8c369262011-08-03 18:12:05 +0100636#ifdef CONFIG_OF
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100637static int l2_wt_override;
638
Russell King96054b02014-03-15 16:47:52 +0000639/* Aurora don't have the cache ID register available, so we have to
640 * pass it though the device tree */
641static u32 cache_id_part_number_from_dt;
642
Russell Kingda3627f2014-03-15 16:48:06 +0000643static void __init l2x0_of_parse(const struct device_node *np,
644 u32 *aux_val, u32 *aux_mask)
645{
646 u32 data[2] = { 0, 0 };
647 u32 tag = 0;
648 u32 dirty = 0;
649 u32 val = 0, mask = 0;
650
651 of_property_read_u32(np, "arm,tag-latency", &tag);
652 if (tag) {
653 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
654 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
655 }
656
657 of_property_read_u32_array(np, "arm,data-latency",
658 data, ARRAY_SIZE(data));
659 if (data[0] && data[1]) {
660 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
661 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
662 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
663 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
664 }
665
666 of_property_read_u32(np, "arm,dirty-latency", &dirty);
667 if (dirty) {
668 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
669 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
670 }
671
672 *aux_val &= ~mask;
673 *aux_val |= val;
674 *aux_mask &= ~mask;
675}
676
Russell Kingda3627f2014-03-15 16:48:06 +0000677static const struct l2c_init_data of_l2x0_data __initconst = {
678 .of_parse = l2x0_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000679 .enable = l2x0_enable,
Russell Kingda3627f2014-03-15 16:48:06 +0000680 .outer_cache = {
681 .inv_range = l2x0_inv_range,
682 .clean_range = l2x0_clean_range,
683 .flush_range = l2x0_flush_range,
684 .flush_all = l2x0_flush_all,
685 .disable = l2x0_disable,
686 .sync = l2x0_cache_sync,
687 .resume = l2x0_resume,
688 },
689};
690
691static void __init pl310_of_parse(const struct device_node *np,
692 u32 *aux_val, u32 *aux_mask)
693{
694 u32 data[3] = { 0, 0, 0 };
695 u32 tag[3] = { 0, 0, 0 };
696 u32 filter[2] = { 0, 0 };
697
698 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
699 if (tag[0] && tag[1] && tag[2])
700 writel_relaxed(
701 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
702 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
703 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
704 l2x0_base + L2X0_TAG_LATENCY_CTRL);
705
706 of_property_read_u32_array(np, "arm,data-latency",
707 data, ARRAY_SIZE(data));
708 if (data[0] && data[1] && data[2])
709 writel_relaxed(
710 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
711 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
712 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
713 l2x0_base + L2X0_DATA_LATENCY_CTRL);
714
715 of_property_read_u32_array(np, "arm,filter-ranges",
716 filter, ARRAY_SIZE(filter));
717 if (filter[1]) {
718 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
719 l2x0_base + L2X0_ADDR_FILTER_END);
720 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
721 l2x0_base + L2X0_ADDR_FILTER_START);
722 }
723}
724
Russell Kingda3627f2014-03-15 16:48:06 +0000725static const struct l2c_init_data of_pl310_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000726 .num_lock = 8,
Russell Kingda3627f2014-03-15 16:48:06 +0000727 .of_parse = pl310_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000728 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +0000729 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +0000730 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +0000731 .outer_cache = {
732 .inv_range = l2x0_inv_range,
733 .clean_range = l2x0_clean_range,
734 .flush_range = l2x0_flush_range,
735 .flush_all = l2x0_flush_all,
736 .disable = l2x0_disable,
737 .sync = l2x0_cache_sync,
Russell King09a5d182014-03-15 16:48:13 +0000738 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +0000739 },
740};
741
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100742/*
743 * Note that the end addresses passed to Linux primitives are
744 * noninclusive, while the hardware cache range operations use
745 * inclusive start and end addresses.
746 */
747static unsigned long calc_range_end(unsigned long start, unsigned long end)
748{
749 /*
750 * Limit the number of cache lines processed at once,
751 * since cache range operations stall the CPU pipeline
752 * until completion.
753 */
754 if (end > start + MAX_RANGE_SIZE)
755 end = start + MAX_RANGE_SIZE;
756
757 /*
758 * Cache range operations can't straddle a page boundary.
759 */
760 if (end > PAGE_ALIGN(start+1))
761 end = PAGE_ALIGN(start+1);
762
763 return end;
764}
765
766/*
767 * Make sure 'start' and 'end' reference the same page, as L2 is PIPT
768 * and range operations only do a TLB lookup on the start address.
769 */
770static void aurora_pa_range(unsigned long start, unsigned long end,
771 unsigned long offset)
772{
773 unsigned long flags;
774
775 raw_spin_lock_irqsave(&l2x0_lock, flags);
Gregory CLEMENT8a3a1802013-01-07 11:28:42 +0100776 writel_relaxed(start, l2x0_base + AURORA_RANGE_BASE_ADDR_REG);
777 writel_relaxed(end, l2x0_base + offset);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100778 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
779
780 cache_sync();
781}
782
783static void aurora_inv_range(unsigned long start, unsigned long end)
784{
785 /*
786 * round start and end adresses up to cache line size
787 */
788 start &= ~(CACHE_LINE_SIZE - 1);
789 end = ALIGN(end, CACHE_LINE_SIZE);
790
791 /*
792 * Invalidate all full cache lines between 'start' and 'end'.
793 */
794 while (start < end) {
795 unsigned long range_end = calc_range_end(start, end);
796 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
797 AURORA_INVAL_RANGE_REG);
798 start = range_end;
799 }
800}
801
802static void aurora_clean_range(unsigned long start, unsigned long end)
803{
804 /*
805 * If L2 is forced to WT, the L2 will always be clean and we
806 * don't need to do anything here.
807 */
808 if (!l2_wt_override) {
809 start &= ~(CACHE_LINE_SIZE - 1);
810 end = ALIGN(end, CACHE_LINE_SIZE);
811 while (start != end) {
812 unsigned long range_end = calc_range_end(start, end);
813 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
814 AURORA_CLEAN_RANGE_REG);
815 start = range_end;
816 }
817 }
818}
819
820static void aurora_flush_range(unsigned long start, unsigned long end)
821{
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100822 start &= ~(CACHE_LINE_SIZE - 1);
823 end = ALIGN(end, CACHE_LINE_SIZE);
824 while (start != end) {
825 unsigned long range_end = calc_range_end(start, end);
826 /*
827 * If L2 is forced to WT, the L2 will always be clean and we
828 * just need to invalidate.
829 */
830 if (l2_wt_override)
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100831 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
Gregory CLEMENT8b827c62013-01-07 11:27:14 +0100832 AURORA_INVAL_RANGE_REG);
833 else
834 aurora_pa_range(start, range_end - CACHE_LINE_SIZE,
835 AURORA_FLUSH_RANGE_REG);
836 start = range_end;
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +0100837 }
838}
839
Russell Kingda3627f2014-03-15 16:48:06 +0000840static void aurora_save(void __iomem *base)
841{
842 l2x0_saved_regs.ctrl = readl_relaxed(base + L2X0_CTRL);
843 l2x0_saved_regs.aux_ctrl = readl_relaxed(base + L2X0_AUX_CTRL);
844}
845
846static void aurora_resume(void)
847{
Russell King09a5d182014-03-15 16:48:13 +0000848 void __iomem *base = l2x0_base;
849
850 if (!(readl(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
851 writel_relaxed(l2x0_saved_regs.aux_ctrl, base + L2X0_AUX_CTRL);
852 writel_relaxed(l2x0_saved_regs.ctrl, base + L2X0_CTRL);
Russell Kingda3627f2014-03-15 16:48:06 +0000853 }
854}
855
Russell King40266d62014-03-15 16:47:59 +0000856/*
857 * For Aurora cache in no outer mode, enable via the CP15 coprocessor
858 * broadcasting of cache commands to L2.
859 */
860static void __init aurora_enable_no_outer(void __iomem *base, u32 aux,
861 unsigned num_lock)
Russell Kingda3627f2014-03-15 16:48:06 +0000862{
Russell King40266d62014-03-15 16:47:59 +0000863 u32 u;
864
865 asm volatile("mrc p15, 1, %0, c15, c2, 0" : "=r" (u));
Russell Kingda3627f2014-03-15 16:48:06 +0000866 u |= AURORA_CTRL_FW; /* Set the FW bit */
Russell King40266d62014-03-15 16:47:59 +0000867 asm volatile("mcr p15, 1, %0, c15, c2, 0" : : "r" (u));
868
Russell Kingda3627f2014-03-15 16:48:06 +0000869 isb();
Russell King40266d62014-03-15 16:47:59 +0000870
871 l2c_enable(base, aux, num_lock);
Russell Kingda3627f2014-03-15 16:48:06 +0000872}
873
Russell King75461f52014-03-15 16:48:07 +0000874static void __init aurora_fixup(void __iomem *base, u32 cache_id,
875 struct outer_cache_fns *fns)
876{
877 sync_reg_offset = AURORA_SYNC_REG;
878}
879
Russell Kingda3627f2014-03-15 16:48:06 +0000880static void __init aurora_of_parse(const struct device_node *np,
881 u32 *aux_val, u32 *aux_mask)
882{
883 u32 val = AURORA_ACR_REPLACEMENT_TYPE_SEMIPLRU;
884 u32 mask = AURORA_ACR_REPLACEMENT_MASK;
885
886 of_property_read_u32(np, "cache-id-part",
887 &cache_id_part_number_from_dt);
888
889 /* Determine and save the write policy */
890 l2_wt_override = of_property_read_bool(np, "wt-override");
891
892 if (l2_wt_override) {
893 val |= AURORA_ACR_FORCE_WRITE_THRO_POLICY;
894 mask |= AURORA_ACR_FORCE_WRITE_POLICY_MASK;
895 }
896
897 *aux_val &= ~mask;
898 *aux_val |= val;
899 *aux_mask &= ~mask;
900}
901
902static const struct l2c_init_data of_aurora_with_outer_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000903 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +0000904 .of_parse = aurora_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +0000905 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +0000906 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +0000907 .save = aurora_save,
908 .outer_cache = {
909 .inv_range = aurora_inv_range,
910 .clean_range = aurora_clean_range,
911 .flush_range = aurora_flush_range,
912 .flush_all = l2x0_flush_all,
913 .disable = l2x0_disable,
914 .sync = l2x0_cache_sync,
915 .resume = aurora_resume,
916 },
917};
918
919static const struct l2c_init_data of_aurora_no_outer_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +0000920 .num_lock = 4,
Russell Kingda3627f2014-03-15 16:48:06 +0000921 .of_parse = aurora_of_parse,
Russell King40266d62014-03-15 16:47:59 +0000922 .enable = aurora_enable_no_outer,
Russell King75461f52014-03-15 16:48:07 +0000923 .fixup = aurora_fixup,
Russell Kingda3627f2014-03-15 16:48:06 +0000924 .save = aurora_save,
925 .outer_cache = {
926 .resume = aurora_resume,
927 },
928};
929
Christian Daudt3b656fe2013-05-09 22:21:01 +0100930/*
931 * For certain Broadcom SoCs, depending on the address range, different offsets
932 * need to be added to the address before passing it to L2 for
933 * invalidation/clean/flush
934 *
935 * Section Address Range Offset EMI
936 * 1 0x00000000 - 0x3FFFFFFF 0x80000000 VC
937 * 2 0x40000000 - 0xBFFFFFFF 0x40000000 SYS
938 * 3 0xC0000000 - 0xFFFFFFFF 0x80000000 VC
939 *
940 * When the start and end addresses have crossed two different sections, we
941 * need to break the L2 operation into two, each within its own section.
942 * For example, if we need to invalidate addresses starts at 0xBFFF0000 and
943 * ends at 0xC0001000, we need do invalidate 1) 0xBFFF0000 - 0xBFFFFFFF and 2)
944 * 0xC0000000 - 0xC0001000
945 *
946 * Note 1:
947 * By breaking a single L2 operation into two, we may potentially suffer some
948 * performance hit, but keep in mind the cross section case is very rare
949 *
950 * Note 2:
951 * We do not need to handle the case when the start address is in
952 * Section 1 and the end address is in Section 3, since it is not a valid use
953 * case
954 *
955 * Note 3:
956 * Section 1 in practical terms can no longer be used on rev A2. Because of
957 * that the code does not need to handle section 1 at all.
958 *
959 */
960#define BCM_SYS_EMI_START_ADDR 0x40000000UL
961#define BCM_VC_EMI_SEC3_START_ADDR 0xC0000000UL
962
963#define BCM_SYS_EMI_OFFSET 0x40000000UL
964#define BCM_VC_EMI_OFFSET 0x80000000UL
965
966static inline int bcm_addr_is_sys_emi(unsigned long addr)
967{
968 return (addr >= BCM_SYS_EMI_START_ADDR) &&
969 (addr < BCM_VC_EMI_SEC3_START_ADDR);
970}
971
972static inline unsigned long bcm_l2_phys_addr(unsigned long addr)
973{
974 if (bcm_addr_is_sys_emi(addr))
975 return addr + BCM_SYS_EMI_OFFSET;
976 else
977 return addr + BCM_VC_EMI_OFFSET;
978}
979
980static void bcm_inv_range(unsigned long start, unsigned long end)
981{
982 unsigned long new_start, new_end;
983
984 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
985
986 if (unlikely(end <= start))
987 return;
988
989 new_start = bcm_l2_phys_addr(start);
990 new_end = bcm_l2_phys_addr(end);
991
992 /* normal case, no cross section between start and end */
993 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
994 l2x0_inv_range(new_start, new_end);
995 return;
996 }
997
998 /* They cross sections, so it can only be a cross from section
999 * 2 to section 3
1000 */
1001 l2x0_inv_range(new_start,
1002 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1003 l2x0_inv_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1004 new_end);
1005}
1006
1007static void bcm_clean_range(unsigned long start, unsigned long end)
1008{
1009 unsigned long new_start, new_end;
1010
1011 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1012
1013 if (unlikely(end <= start))
1014 return;
1015
1016 if ((end - start) >= l2x0_size) {
1017 l2x0_clean_all();
1018 return;
1019 }
1020
1021 new_start = bcm_l2_phys_addr(start);
1022 new_end = bcm_l2_phys_addr(end);
1023
1024 /* normal case, no cross section between start and end */
1025 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1026 l2x0_clean_range(new_start, new_end);
1027 return;
1028 }
1029
1030 /* They cross sections, so it can only be a cross from section
1031 * 2 to section 3
1032 */
1033 l2x0_clean_range(new_start,
1034 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1035 l2x0_clean_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1036 new_end);
1037}
1038
1039static void bcm_flush_range(unsigned long start, unsigned long end)
1040{
1041 unsigned long new_start, new_end;
1042
1043 BUG_ON(start < BCM_SYS_EMI_START_ADDR);
1044
1045 if (unlikely(end <= start))
1046 return;
1047
1048 if ((end - start) >= l2x0_size) {
1049 l2x0_flush_all();
1050 return;
1051 }
1052
1053 new_start = bcm_l2_phys_addr(start);
1054 new_end = bcm_l2_phys_addr(end);
1055
1056 /* normal case, no cross section between start and end */
1057 if (likely(bcm_addr_is_sys_emi(end) || !bcm_addr_is_sys_emi(start))) {
1058 l2x0_flush_range(new_start, new_end);
1059 return;
1060 }
1061
1062 /* They cross sections, so it can only be a cross from section
1063 * 2 to section 3
1064 */
1065 l2x0_flush_range(new_start,
1066 bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR-1));
1067 l2x0_flush_range(bcm_l2_phys_addr(BCM_VC_EMI_SEC3_START_ADDR),
1068 new_end);
1069}
1070
Russell Kingda3627f2014-03-15 16:48:06 +00001071static const struct l2c_init_data of_bcm_l2x0_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +00001072 .num_lock = 8,
Russell Kingda3627f2014-03-15 16:48:06 +00001073 .of_parse = pl310_of_parse,
Russell King3b8bad52014-03-15 16:47:57 +00001074 .enable = l2c_enable,
Russell King75461f52014-03-15 16:48:07 +00001075 .fixup = l2c310_fixup,
Russell King09a5d182014-03-15 16:48:13 +00001076 .save = l2c310_save,
Russell Kingda3627f2014-03-15 16:48:06 +00001077 .outer_cache = {
1078 .inv_range = bcm_inv_range,
1079 .clean_range = bcm_clean_range,
1080 .flush_range = bcm_flush_range,
1081 .flush_all = l2x0_flush_all,
1082 .disable = l2x0_disable,
1083 .sync = l2x0_cache_sync,
Russell King09a5d182014-03-15 16:48:13 +00001084 .resume = l2c310_resume,
Russell Kingda3627f2014-03-15 16:48:06 +00001085 },
1086};
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001087
Russell King9846dfc2014-03-15 16:47:55 +00001088static void __init tauros3_save(void __iomem *base)
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001089{
1090 l2x0_saved_regs.aux2_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001091 readl_relaxed(base + TAUROS3_AUX2_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001092 l2x0_saved_regs.prefetch_ctrl =
Russell King9846dfc2014-03-15 16:47:55 +00001093 readl_relaxed(base + L2X0_PREFETCH_CTRL);
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001094}
1095
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001096static void tauros3_resume(void)
1097{
Russell King09a5d182014-03-15 16:48:13 +00001098 void __iomem *base = l2x0_base;
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001099
Russell King09a5d182014-03-15 16:48:13 +00001100 if (!(readl_relaxed(base + L2X0_CTRL) & L2X0_CTRL_EN)) {
1101 writel_relaxed(l2x0_saved_regs.aux2_ctrl,
1102 base + TAUROS3_AUX2_CTRL);
1103 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
1104 base + L2X0_PREFETCH_CTRL);
1105
1106 l2c_enable(base, l2x0_saved_regs.aux_ctrl, 8);
1107 }
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001108}
1109
Russell Kingc02642b2014-03-15 16:47:54 +00001110static const struct l2c_init_data of_tauros3_data __initconst = {
Russell King3b8bad52014-03-15 16:47:57 +00001111 .num_lock = 8,
1112 .enable = l2c_enable,
Sebastian Hesselbarthe68f31f2013-12-13 16:42:19 +01001113 .save = tauros3_save,
1114 /* Tauros3 broadcasts L1 cache operations to L2 */
1115 .outer_cache = {
1116 .resume = tauros3_resume,
1117 },
1118};
1119
Russell Kinga65bb922014-03-15 16:48:01 +00001120#define L2C_ID(name, fns) { .compatible = name, .data = (void *)&fns }
Rob Herring8c369262011-08-03 18:12:05 +01001121static const struct of_device_id l2x0_ids[] __initconst = {
Russell Kingc02642b2014-03-15 16:47:54 +00001122 L2C_ID("arm,l210-cache", of_l2x0_data),
1123 L2C_ID("arm,l220-cache", of_l2x0_data),
1124 L2C_ID("arm,pl310-cache", of_pl310_data),
1125 L2C_ID("brcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
1126 L2C_ID("marvell,aurora-outer-cache", of_aurora_with_outer_data),
1127 L2C_ID("marvell,aurora-system-cache", of_aurora_no_outer_data),
1128 L2C_ID("marvell,tauros3-cache", of_tauros3_data),
Russell Kinga65bb922014-03-15 16:48:01 +00001129 /* Deprecated IDs */
Russell Kingc02642b2014-03-15 16:47:54 +00001130 L2C_ID("bcm,bcm11351-a2-pl310-cache", of_bcm_l2x0_data),
Rob Herring8c369262011-08-03 18:12:05 +01001131 {}
1132};
1133
Russell King3e175ca2011-09-18 11:27:30 +01001134int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +01001135{
Russell Kingc02642b2014-03-15 16:47:54 +00001136 const struct l2c_init_data *data;
Rob Herring8c369262011-08-03 18:12:05 +01001137 struct device_node *np;
Barry Song91c2ebb2011-09-30 14:43:12 +01001138 struct resource res;
Russell King96054b02014-03-15 16:47:52 +00001139 u32 cache_id;
Rob Herring8c369262011-08-03 18:12:05 +01001140
1141 np = of_find_matching_node(NULL, l2x0_ids);
1142 if (!np)
1143 return -ENODEV;
Barry Song91c2ebb2011-09-30 14:43:12 +01001144
1145 if (of_address_to_resource(np, 0, &res))
1146 return -ENODEV;
1147
1148 l2x0_base = ioremap(res.start, resource_size(&res));
Rob Herring8c369262011-08-03 18:12:05 +01001149 if (!l2x0_base)
1150 return -ENOMEM;
1151
Barry Song91c2ebb2011-09-30 14:43:12 +01001152 l2x0_saved_regs.phy_base = res.start;
1153
1154 data = of_match_node(l2x0_ids, np)->data;
1155
Rob Herring8c369262011-08-03 18:12:05 +01001156 /* L2 configuration can only be changed if the cache is disabled */
Russell King40266d62014-03-15 16:47:59 +00001157 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & L2X0_CTRL_EN))
Russell Kingc02642b2014-03-15 16:47:54 +00001158 if (data->of_parse)
1159 data->of_parse(np, &aux_val, &aux_mask);
Gregory CLEMENTb8db6b82012-11-06 01:58:07 +01001160
Russell King96054b02014-03-15 16:47:52 +00001161 if (cache_id_part_number_from_dt)
1162 cache_id = cache_id_part_number_from_dt;
1163 else
1164 cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
1165
1166 __l2c_init(data, aux_val, aux_mask, cache_id);
Gregory CLEMENT6248d062012-10-01 10:56:42 +01001167
Rob Herring8c369262011-08-03 18:12:05 +01001168 return 0;
1169}
1170#endif