blob: 8b2566abc16ba485408a32e994a93410cfd36563 [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 * Copyright (c) 2009, 2011, Code Aurora Forum. All rights reserved.
Catalin Marinas382266a2007-02-05 14:48:19 +01006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
Rob Herring78ae8b12011-08-03 18:12:05 +010020#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010021#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010022#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010023#include <linux/io.h>
Rob Herring78ae8b12011-08-03 18:12:05 +010024#include <linux/of.h>
25#include <linux/of_address.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010026
27#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010028#include <asm/hardware/cache-l2x0.h>
29
30#define CACHE_LINE_SIZE 32
31
32static void __iomem *l2x0_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070033static uint32_t aux_ctrl_save;
Maheshkumar Sivasubramanianc71d8ff2011-09-26 13:17:58 -060034static uint32_t data_latency_ctrl;
Thomas Gleixner450ea482009-07-03 08:44:46 -050035static DEFINE_RAW_SPINLOCK(l2x0_lock);
36
Jason McMullan64039be2010-05-05 18:59:37 +010037static uint32_t l2x0_way_mask; /* Bitmask of active ways */
Santosh Shilimkar5ba70372010-07-11 14:35:37 +053038static uint32_t l2x0_size;
Colin Cross5ea3a7c2011-09-14 15:59:50 -070039static u32 l2x0_cache_id;
40static unsigned int l2x0_sets;
41static unsigned int l2x0_ways;
42
43static inline bool is_pl310_rev(int rev)
44{
45 return (l2x0_cache_id &
46 (L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
47 (L2X0_CACHE_ID_PART_L310 | rev);
48}
Catalin Marinas382266a2007-02-05 14:48:19 +010049
Catalin Marinas9a6655e2010-08-31 13:05:22 +010050static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010051{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010052 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010053 while (readl_relaxed(reg) & mask)
Barry Song7f5910a2011-09-09 10:30:34 +010054 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010055}
56
Catalin Marinas9a6655e2010-08-31 13:05:22 +010057#ifdef CONFIG_CACHE_PL310
58static inline void cache_wait(void __iomem *reg, unsigned long mask)
59{
60 /* cache operations by line are atomic on PL310 */
61}
62#else
63#define cache_wait cache_wait_way
64#endif
65
Catalin Marinas382266a2007-02-05 14:48:19 +010066static inline void cache_sync(void)
67{
Russell King3d107432009-11-19 11:41:09 +000068 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +010069
70#ifdef CONFIG_ARM_ERRATA_753970
71 /* write to an unmmapped register */
72 writel_relaxed(0, base + L2X0_DUMMY_REG);
73#else
Catalin Marinas6775a552010-07-28 22:01:25 +010074 writel_relaxed(0, base + L2X0_CACHE_SYNC);
Srinidhi Kasagar885028e2011-02-17 07:03:51 +010075#endif
Russell King3d107432009-11-19 11:41:09 +000076 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +010077}
78
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010079static inline void l2x0_clean_line(unsigned long addr)
80{
81 void __iomem *base = l2x0_base;
82 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010083 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010084}
85
86static inline void l2x0_inv_line(unsigned long addr)
87{
88 void __iomem *base = l2x0_base;
89 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010090 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010091}
92
Santosh Shilimkar2839e062011-03-08 06:59:54 +010093#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Santosh Shilimkar9e655822010-02-04 19:42:42 +010094
Santosh Shilimkar2839e062011-03-08 06:59:54 +010095#define debug_writel(val) outer_cache.set_debug(val)
96
97static void l2x0_set_debug(unsigned long val)
98{
99 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
100}
101#else
102/* Optimised out for non-errata case */
103static inline void debug_writel(unsigned long val)
104{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100105}
106
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100107#define l2x0_set_debug NULL
108#endif
109
110#ifdef CONFIG_PL310_ERRATA_588369
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100111static inline void l2x0_flush_line(unsigned long addr)
112{
113 void __iomem *base = l2x0_base;
114
115 /* Clean by PA followed by Invalidate by PA */
116 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100117 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100118 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100119 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100120}
121#else
122
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100123static inline void l2x0_flush_line(unsigned long addr)
124{
125 void __iomem *base = l2x0_base;
126 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100127 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100128}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100129#endif
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100130
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700131void l2x0_cache_sync(void)
Catalin Marinas23107c52010-03-24 16:48:53 +0100132{
Thomas Gleixner450ea482009-07-03 08:44:46 -0500133 unsigned long flags;
134
135 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100136 cache_sync();
Thomas Gleixner450ea482009-07-03 08:44:46 -0500137 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100138}
139
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700140#ifdef CONFIG_PL310_ERRATA_727915
141static void l2x0_for_each_set_way(void __iomem *reg)
142{
143 int set;
144 int way;
145 unsigned long flags;
146
147 for (way = 0; way < l2x0_ways; way++) {
148 spin_lock_irqsave(&l2x0_lock, flags);
149 for (set = 0; set < l2x0_sets; set++)
150 writel_relaxed((way << 28) | (set << 5), reg);
151 cache_sync();
152 spin_unlock_irqrestore(&l2x0_lock, flags);
153 }
154}
155#endif
156
Will Deacon38a89142011-07-01 14:36:19 +0100157static void __l2x0_flush_all(void)
158{
159 debug_writel(0x03);
160 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
161 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
162 cache_sync();
163 debug_writel(0x00);
164}
165
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530166static void l2x0_flush_all(void)
167{
168 unsigned long flags;
169
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700170#ifdef CONFIG_PL310_ERRATA_727915
171 if (is_pl310_rev(REV_PL310_R2P0)) {
172 l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
173 return;
174 }
175#endif
176
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530177 /* clean all ways */
Thomas Gleixner450ea482009-07-03 08:44:46 -0500178 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100179 __l2x0_flush_all();
Thomas Gleixner450ea482009-07-03 08:44:46 -0500180 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530181}
182
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530183static void l2x0_clean_all(void)
184{
185 unsigned long flags;
186
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700187#ifdef CONFIG_PL310_ERRATA_727915
188 if (is_pl310_rev(REV_PL310_R2P0)) {
189 l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
190 return;
191 }
192#endif
193
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530194 /* clean all ways */
Thomas Gleixner450ea482009-07-03 08:44:46 -0500195 raw_spin_lock_irqsave(&l2x0_lock, flags);
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700196 debug_writel(0x03);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530197 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
198 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
199 cache_sync();
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700200 debug_writel(0x00);
Thomas Gleixner450ea482009-07-03 08:44:46 -0500201 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530202}
203
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530204static void l2x0_inv_all(void)
Catalin Marinas382266a2007-02-05 14:48:19 +0100205{
Russell King0eb948d2009-11-19 11:12:15 +0000206 unsigned long flags;
207
Catalin Marinas382266a2007-02-05 14:48:19 +0100208 /* invalidate all ways */
Thomas Gleixner450ea482009-07-03 08:44:46 -0500209 raw_spin_lock_irqsave(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530210 /* Invalidating when L2 is enabled is a nono */
211 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100212 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100213 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
Catalin Marinas382266a2007-02-05 14:48:19 +0100214 cache_sync();
Thomas Gleixner450ea482009-07-03 08:44:46 -0500215 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100216}
217
218static void l2x0_inv_range(unsigned long start, unsigned long end)
219{
Russell King3d107432009-11-19 11:41:09 +0000220 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000221 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100222
Thomas Gleixner450ea482009-07-03 08:44:46 -0500223 raw_spin_lock_irqsave(&l2x0_lock, flags);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100224 if (start & (CACHE_LINE_SIZE - 1)) {
225 start &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100226 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100227 l2x0_flush_line(start);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100228 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100229 start += CACHE_LINE_SIZE;
230 }
231
232 if (end & (CACHE_LINE_SIZE - 1)) {
233 end &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100234 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100235 l2x0_flush_line(end);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100236 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100237 }
238
Russell King0eb948d2009-11-19 11:12:15 +0000239 while (start < end) {
240 unsigned long blk_end = start + min(end - start, 4096UL);
241
242 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100243 l2x0_inv_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000244 start += CACHE_LINE_SIZE;
245 }
246
247 if (blk_end < end) {
Thomas Gleixner450ea482009-07-03 08:44:46 -0500248 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
249 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000250 }
251 }
Russell King3d107432009-11-19 11:41:09 +0000252 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100253 cache_sync();
Thomas Gleixner450ea482009-07-03 08:44:46 -0500254 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100255}
256
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257static void l2x0_inv_range_atomic(unsigned long start, unsigned long end)
258{
259 unsigned long addr;
260
261 if (start & (CACHE_LINE_SIZE - 1)) {
262 start &= ~(CACHE_LINE_SIZE - 1);
263 writel_relaxed(start, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
264 start += CACHE_LINE_SIZE;
265 }
266
267 if (end & (CACHE_LINE_SIZE - 1)) {
268 end &= ~(CACHE_LINE_SIZE - 1);
269 writel_relaxed(end, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
270 }
271
272 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
273 writel_relaxed(addr, l2x0_base + L2X0_INV_LINE_PA);
274
275 mb();
276}
277
Catalin Marinas382266a2007-02-05 14:48:19 +0100278static void l2x0_clean_range(unsigned long start, unsigned long end)
279{
Russell King3d107432009-11-19 11:41:09 +0000280 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000281 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100282
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530283 if ((end - start) >= l2x0_size) {
284 l2x0_clean_all();
285 return;
286 }
287
Thomas Gleixner450ea482009-07-03 08:44:46 -0500288 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100289 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000290 while (start < end) {
291 unsigned long blk_end = start + min(end - start, 4096UL);
292
293 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100294 l2x0_clean_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000295 start += CACHE_LINE_SIZE;
296 }
297
298 if (blk_end < end) {
Thomas Gleixner450ea482009-07-03 08:44:46 -0500299 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
300 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000301 }
302 }
Russell King3d107432009-11-19 11:41:09 +0000303 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100304 cache_sync();
Thomas Gleixner450ea482009-07-03 08:44:46 -0500305 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100306}
307
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700308static void l2x0_clean_range_atomic(unsigned long start, unsigned long end)
309{
310 unsigned long addr;
311
312 start &= ~(CACHE_LINE_SIZE - 1);
313 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
314 writel_relaxed(addr, l2x0_base + L2X0_CLEAN_LINE_PA);
315
316 mb();
317}
318
Catalin Marinas382266a2007-02-05 14:48:19 +0100319static void l2x0_flush_range(unsigned long start, unsigned long end)
320{
Russell King3d107432009-11-19 11:41:09 +0000321 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000322 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100323
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530324 if ((end - start) >= l2x0_size) {
325 l2x0_flush_all();
326 return;
327 }
328
Thomas Gleixner450ea482009-07-03 08:44:46 -0500329 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100330 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000331 while (start < end) {
332 unsigned long blk_end = start + min(end - start, 4096UL);
333
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100334 debug_writel(0x03);
Russell King0eb948d2009-11-19 11:12:15 +0000335 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100336 l2x0_flush_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000337 start += CACHE_LINE_SIZE;
338 }
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100339 debug_writel(0x00);
Russell King0eb948d2009-11-19 11:12:15 +0000340
341 if (blk_end < end) {
Thomas Gleixner450ea482009-07-03 08:44:46 -0500342 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
343 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000344 }
345 }
Russell King3d107432009-11-19 11:41:09 +0000346 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100347 cache_sync();
Thomas Gleixner450ea482009-07-03 08:44:46 -0500348 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100349}
350
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700351void l2x0_flush_range_atomic(unsigned long start, unsigned long end)
352{
353 unsigned long addr;
354
355 start &= ~(CACHE_LINE_SIZE - 1);
356 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
357 writel_relaxed(addr, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
358
359 mb();
360}
361
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530362static void l2x0_disable(void)
363{
364 unsigned long flags;
365
Thomas Gleixner450ea482009-07-03 08:44:46 -0500366 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100367 __l2x0_flush_all();
368 writel_relaxed(0, l2x0_base + L2X0_CTRL);
369 dsb();
Thomas Gleixner450ea482009-07-03 08:44:46 -0500370 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530371}
372
Catalin Marinas382266a2007-02-05 14:48:19 +0100373void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
374{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700375 __u32 aux, bits;
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530376 __u32 way_size = 0;
Jason McMullan64039be2010-05-05 18:59:37 +0100377 const char *type;
Catalin Marinas382266a2007-02-05 14:48:19 +0100378
379 l2x0_base = base;
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700380 l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700381
382 bits = readl_relaxed(l2x0_base + L2X0_CTRL);
383 bits &= ~0x01; /* clear bit 0 */
384 writel_relaxed(bits, l2x0_base + L2X0_CTRL);
385
Catalin Marinas6775a552010-07-28 22:01:25 +0100386 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100387
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100388 aux &= aux_mask;
389 aux |= aux_val;
390
Jason McMullan64039be2010-05-05 18:59:37 +0100391 /* Determine the number of ways */
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700392 switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100393 case L2X0_CACHE_ID_PART_L310:
394 if (aux & (1 << 16))
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700395 l2x0_ways = 16;
Jason McMullan64039be2010-05-05 18:59:37 +0100396 else
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700397 l2x0_ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100398 type = "L310";
399 break;
400 case L2X0_CACHE_ID_PART_L210:
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700401 l2x0_ways = (aux >> 13) & 0xf;
Jason McMullan64039be2010-05-05 18:59:37 +0100402 type = "L210";
403 break;
404 default:
405 /* Assume unknown chips have 8 ways */
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700406 l2x0_ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100407 type = "L2x0 series";
408 break;
409 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700410 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700411 l2x0_way_mask = (1 << l2x0_ways) - 1;
Jason McMullan64039be2010-05-05 18:59:37 +0100412
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100413 /*
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530414 * L2 cache Size = Way size * Number of ways
415 */
416 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700417 way_size = SZ_1K << (way_size + 3);
418 l2x0_size = l2x0_ways * way_size;
419 l2x0_sets = way_size / CACHE_LINE_SIZE;
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530420
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421 l2x0_inv_all();
Catalin Marinas382266a2007-02-05 14:48:19 +0100422
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700423 /* enable L2X0 */
424 bits = readl_relaxed(l2x0_base + L2X0_CTRL);
425 bits |= 0x01; /* set bit 0 */
426 writel_relaxed(bits, l2x0_base + L2X0_CTRL);
Catalin Marinas382266a2007-02-05 14:48:19 +0100427
Bryan Huntsmand074fa22011-11-16 13:52:50 -0800428 switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700429 case L2X0_CACHE_ID_PART_L220:
430 outer_cache.inv_range = l2x0_inv_range;
431 outer_cache.clean_range = l2x0_clean_range;
432 outer_cache.flush_range = l2x0_flush_range;
433 printk(KERN_INFO "L220 cache controller enabled\n");
434 break;
435 case L2X0_CACHE_ID_PART_L310:
436 outer_cache.inv_range = l2x0_inv_range;
437 outer_cache.clean_range = l2x0_clean_range;
438 outer_cache.flush_range = l2x0_flush_range;
439 printk(KERN_INFO "L310 cache controller enabled\n");
440 break;
441 case L2X0_CACHE_ID_PART_L210:
442 default:
443 outer_cache.inv_range = l2x0_inv_range_atomic;
444 outer_cache.clean_range = l2x0_clean_range_atomic;
445 outer_cache.flush_range = l2x0_flush_range_atomic;
446 printk(KERN_INFO "L210 cache controller enabled\n");
447 break;
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100448 }
Catalin Marinas382266a2007-02-05 14:48:19 +0100449
Catalin Marinas23107c52010-03-24 16:48:53 +0100450 outer_cache.sync = l2x0_cache_sync;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700451
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530452 outer_cache.flush_all = l2x0_flush_all;
453 outer_cache.inv_all = l2x0_inv_all;
454 outer_cache.disable = l2x0_disable;
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100455 outer_cache.set_debug = l2x0_set_debug;
Catalin Marinas382266a2007-02-05 14:48:19 +0100456
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700457 mb();
Jason McMullan64039be2010-05-05 18:59:37 +0100458 printk(KERN_INFO "%s cache controller enabled\n", type);
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530459 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700460 l2x0_ways, l2x0_cache_id, aux, l2x0_size);
Catalin Marinas382266a2007-02-05 14:48:19 +0100461}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700462
463void l2x0_suspend(void)
464{
465 /* Save aux control register value */
466 aux_ctrl_save = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Maheshkumar Sivasubramanianc71d8ff2011-09-26 13:17:58 -0600467 data_latency_ctrl = readl_relaxed(l2x0_base + L2X0_DATA_LATENCY_CTRL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700468 /* Flush all cache */
469 l2x0_flush_all();
470 /* Disable the cache */
471 writel_relaxed(0, l2x0_base + L2X0_CTRL);
472
473 /* Memory barrier */
474 dmb();
475}
476
477void l2x0_resume(int collapsed)
478{
479 if (collapsed) {
480 /* Disable the cache */
481 writel_relaxed(0, l2x0_base + L2X0_CTRL);
482
483 /* Restore aux control register value */
484 writel_relaxed(aux_ctrl_save, l2x0_base + L2X0_AUX_CTRL);
Maheshkumar Sivasubramanianc71d8ff2011-09-26 13:17:58 -0600485 writel_relaxed(data_latency_ctrl, l2x0_base +
486 L2X0_DATA_LATENCY_CTRL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700487
488 /* Invalidate the cache */
489 l2x0_inv_all();
Anji jonnala4ddc453f2012-04-03 12:02:53 +0530490 /*
491 * TBD: make sure that l2xo_inv_all finished
492 * before actually enabling the cache. Logically this
493 * is not required as cache sync is atomic operation.
494 * but on 8x25, observed the random crashes and they go
495 * away if we add dmb or disable the L2.
496 * keeping this as temporary workaround until root
497 * cause is find out.
498 */
499 dmb();
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700500 }
501
502 /* Enable the cache */
503 writel_relaxed(1, l2x0_base + L2X0_CTRL);
504
505 mb();
506}
Rob Herring78ae8b12011-08-03 18:12:05 +0100507
508#ifdef CONFIG_OF
509static void __init l2x0_of_setup(const struct device_node *np,
510 __u32 *aux_val, __u32 *aux_mask)
511{
512 u32 data[2] = { 0, 0 };
513 u32 tag = 0;
514 u32 dirty = 0;
515 u32 val = 0, mask = 0;
516
517 of_property_read_u32(np, "arm,tag-latency", &tag);
518 if (tag) {
519 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
520 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
521 }
522
523 of_property_read_u32_array(np, "arm,data-latency",
524 data, ARRAY_SIZE(data));
525 if (data[0] && data[1]) {
526 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
527 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
528 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
529 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
530 }
531
532 of_property_read_u32(np, "arm,dirty-latency", &dirty);
533 if (dirty) {
534 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
535 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
536 }
537
538 *aux_val &= ~mask;
539 *aux_val |= val;
540 *aux_mask &= ~mask;
541}
542
543static void __init pl310_of_setup(const struct device_node *np,
544 __u32 *aux_val, __u32 *aux_mask)
545{
546 u32 data[3] = { 0, 0, 0 };
547 u32 tag[3] = { 0, 0, 0 };
548 u32 filter[2] = { 0, 0 };
549
550 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
551 if (tag[0] && tag[1] && tag[2])
552 writel_relaxed(
553 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
554 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
555 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
556 l2x0_base + L2X0_TAG_LATENCY_CTRL);
557
558 of_property_read_u32_array(np, "arm,data-latency",
559 data, ARRAY_SIZE(data));
560 if (data[0] && data[1] && data[2])
561 writel_relaxed(
562 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
563 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
564 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
565 l2x0_base + L2X0_DATA_LATENCY_CTRL);
566
567 of_property_read_u32_array(np, "arm,filter-ranges",
568 filter, ARRAY_SIZE(filter));
Barry Songce72d042011-09-14 03:20:01 +0100569 if (filter[1]) {
Rob Herring78ae8b12011-08-03 18:12:05 +0100570 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
571 l2x0_base + L2X0_ADDR_FILTER_END);
572 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
573 l2x0_base + L2X0_ADDR_FILTER_START);
574 }
575}
576
577static const struct of_device_id l2x0_ids[] __initconst = {
578 { .compatible = "arm,pl310-cache", .data = pl310_of_setup },
579 { .compatible = "arm,l220-cache", .data = l2x0_of_setup },
580 { .compatible = "arm,l210-cache", .data = l2x0_of_setup },
581 {}
582};
583
584int __init l2x0_of_init(__u32 aux_val, __u32 aux_mask)
585{
586 struct device_node *np;
587 void (*l2_setup)(const struct device_node *np,
588 __u32 *aux_val, __u32 *aux_mask);
589
590 np = of_find_matching_node(NULL, l2x0_ids);
591 if (!np)
592 return -ENODEV;
593 l2x0_base = of_iomap(np, 0);
594 if (!l2x0_base)
595 return -ENOMEM;
596
597 /* L2 configuration can only be changed if the cache is disabled */
598 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
599 l2_setup = of_match_node(l2x0_ids, np)->data;
600 if (l2_setup)
601 l2_setup(np, &aux_val, &aux_mask);
602 }
603 l2x0_init(l2x0_base, aux_val, aux_mask);
604 return 0;
605}
606#endif