blob: 7aacb84ac34c178285a257bb14ce8e0e6ef0e181 [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 * Copyright (c) 2009, 2011, Code Aurora Forum. All rights reserved.
Catalin Marinas382266a2007-02-05 14:48:19 +01006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010021#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010022#include <linux/io.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010023
24#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010025#include <asm/hardware/cache-l2x0.h>
26
27#define CACHE_LINE_SIZE 32
28
29static void __iomem *l2x0_base;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070030static uint32_t aux_ctrl_save;
Maheshkumar Sivasubramanianc71d8ff2011-09-26 13:17:58 -060031static uint32_t data_latency_ctrl;
Catalin Marinas07620972007-07-20 11:42:40 +010032static DEFINE_SPINLOCK(l2x0_lock);
Jason McMullan64039be2010-05-05 18:59:37 +010033static uint32_t l2x0_way_mask; /* Bitmask of active ways */
Santosh Shilimkar5ba70372010-07-11 14:35:37 +053034static uint32_t l2x0_size;
Colin Cross5ea3a7c2011-09-14 15:59:50 -070035static u32 l2x0_cache_id;
36static unsigned int l2x0_sets;
37static unsigned int l2x0_ways;
38
39static inline bool is_pl310_rev(int rev)
40{
41 return (l2x0_cache_id &
42 (L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
43 (L2X0_CACHE_ID_PART_L310 | rev);
44}
Catalin Marinas382266a2007-02-05 14:48:19 +010045
Catalin Marinas9a6655e2010-08-31 13:05:22 +010046static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010047{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010048 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010049 while (readl_relaxed(reg) & mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010050 ;
Catalin Marinas382266a2007-02-05 14:48:19 +010051}
52
Catalin Marinas9a6655e2010-08-31 13:05:22 +010053#ifdef CONFIG_CACHE_PL310
54static inline void cache_wait(void __iomem *reg, unsigned long mask)
55{
56 /* cache operations by line are atomic on PL310 */
57}
58#else
59#define cache_wait cache_wait_way
60#endif
61
Catalin Marinas382266a2007-02-05 14:48:19 +010062static inline void cache_sync(void)
63{
Russell King3d107432009-11-19 11:41:09 +000064 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +010065
66#ifdef CONFIG_ARM_ERRATA_753970
67 /* write to an unmmapped register */
68 writel_relaxed(0, base + L2X0_DUMMY_REG);
69#else
Catalin Marinas6775a552010-07-28 22:01:25 +010070 writel_relaxed(0, base + L2X0_CACHE_SYNC);
Srinidhi Kasagar885028e2011-02-17 07:03:51 +010071#endif
Russell King3d107432009-11-19 11:41:09 +000072 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +010073}
74
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010075static inline void l2x0_clean_line(unsigned long addr)
76{
77 void __iomem *base = l2x0_base;
78 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010079 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010080}
81
82static inline void l2x0_inv_line(unsigned long addr)
83{
84 void __iomem *base = l2x0_base;
85 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010086 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010087}
88
Santosh Shilimkar2839e062011-03-08 06:59:54 +010089#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Santosh Shilimkar9e655822010-02-04 19:42:42 +010090
Santosh Shilimkar2839e062011-03-08 06:59:54 +010091#define debug_writel(val) outer_cache.set_debug(val)
92
93static void l2x0_set_debug(unsigned long val)
94{
95 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
96}
97#else
98/* Optimised out for non-errata case */
99static inline void debug_writel(unsigned long val)
100{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100101}
102
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100103#define l2x0_set_debug NULL
104#endif
105
106#ifdef CONFIG_PL310_ERRATA_588369
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100107static inline void l2x0_flush_line(unsigned long addr)
108{
109 void __iomem *base = l2x0_base;
110
111 /* Clean by PA followed by Invalidate by PA */
112 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100113 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100114 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100115 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100116}
117#else
118
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100119static inline void l2x0_flush_line(unsigned long addr)
120{
121 void __iomem *base = l2x0_base;
122 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100123 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100124}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100125#endif
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100126
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127void l2x0_cache_sync(void)
Catalin Marinas23107c52010-03-24 16:48:53 +0100128{
Catalin Marinas23107c52010-03-24 16:48:53 +0100129 cache_sync();
Catalin Marinas23107c52010-03-24 16:48:53 +0100130}
131
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700132#ifdef CONFIG_PL310_ERRATA_727915
133static void l2x0_for_each_set_way(void __iomem *reg)
134{
135 int set;
136 int way;
137 unsigned long flags;
138
139 for (way = 0; way < l2x0_ways; way++) {
140 spin_lock_irqsave(&l2x0_lock, flags);
141 for (set = 0; set < l2x0_sets; set++)
142 writel_relaxed((way << 28) | (set << 5), reg);
143 cache_sync();
144 spin_unlock_irqrestore(&l2x0_lock, flags);
145 }
146}
147#endif
148
Will Deacon38a89142011-07-01 14:36:19 +0100149static void __l2x0_flush_all(void)
150{
151 debug_writel(0x03);
152 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
153 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
154 cache_sync();
155 debug_writel(0x00);
156}
157
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530158static void l2x0_flush_all(void)
159{
160 unsigned long flags;
161
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700162#ifdef CONFIG_PL310_ERRATA_727915
163 if (is_pl310_rev(REV_PL310_R2P0)) {
164 l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
165 return;
166 }
167#endif
168
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530169 /* clean all ways */
170 spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100171 __l2x0_flush_all();
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530172 spin_unlock_irqrestore(&l2x0_lock, flags);
173}
174
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530175static void l2x0_clean_all(void)
176{
177 unsigned long flags;
178
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700179#ifdef CONFIG_PL310_ERRATA_727915
180 if (is_pl310_rev(REV_PL310_R2P0)) {
181 l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
182 return;
183 }
184#endif
185
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530186 /* clean all ways */
187 spin_lock_irqsave(&l2x0_lock, flags);
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700188 debug_writel(0x03);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530189 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
190 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
191 cache_sync();
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700192 debug_writel(0x00);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530193 spin_unlock_irqrestore(&l2x0_lock, flags);
194}
195
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530196static void l2x0_inv_all(void)
Catalin Marinas382266a2007-02-05 14:48:19 +0100197{
Russell King0eb948d2009-11-19 11:12:15 +0000198 unsigned long flags;
199
Catalin Marinas382266a2007-02-05 14:48:19 +0100200 /* invalidate all ways */
Russell King0eb948d2009-11-19 11:12:15 +0000201 spin_lock_irqsave(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530202 /* Invalidating when L2 is enabled is a nono */
203 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100204 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100205 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
Catalin Marinas382266a2007-02-05 14:48:19 +0100206 cache_sync();
Russell King0eb948d2009-11-19 11:12:15 +0000207 spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100208}
209
210static void l2x0_inv_range(unsigned long start, unsigned long end)
211{
Russell King3d107432009-11-19 11:41:09 +0000212 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000213 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100214
Russell King0eb948d2009-11-19 11:12:15 +0000215 spin_lock_irqsave(&l2x0_lock, flags);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100216 if (start & (CACHE_LINE_SIZE - 1)) {
217 start &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100218 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100219 l2x0_flush_line(start);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100220 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100221 start += CACHE_LINE_SIZE;
222 }
223
224 if (end & (CACHE_LINE_SIZE - 1)) {
225 end &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100226 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100227 l2x0_flush_line(end);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100228 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100229 }
230
Russell King0eb948d2009-11-19 11:12:15 +0000231 while (start < end) {
232 unsigned long blk_end = start + min(end - start, 4096UL);
233
234 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100235 l2x0_inv_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000236 start += CACHE_LINE_SIZE;
237 }
238
239 if (blk_end < end) {
240 spin_unlock_irqrestore(&l2x0_lock, flags);
241 spin_lock_irqsave(&l2x0_lock, flags);
242 }
243 }
Russell King3d107432009-11-19 11:41:09 +0000244 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100245 cache_sync();
Russell King0eb948d2009-11-19 11:12:15 +0000246 spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100247}
248
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700249static void l2x0_inv_range_atomic(unsigned long start, unsigned long end)
250{
251 unsigned long addr;
252
253 if (start & (CACHE_LINE_SIZE - 1)) {
254 start &= ~(CACHE_LINE_SIZE - 1);
255 writel_relaxed(start, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
256 start += CACHE_LINE_SIZE;
257 }
258
259 if (end & (CACHE_LINE_SIZE - 1)) {
260 end &= ~(CACHE_LINE_SIZE - 1);
261 writel_relaxed(end, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
262 }
263
264 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
265 writel_relaxed(addr, l2x0_base + L2X0_INV_LINE_PA);
266
267 mb();
268}
269
Catalin Marinas382266a2007-02-05 14:48:19 +0100270static void l2x0_clean_range(unsigned long start, unsigned long end)
271{
Russell King3d107432009-11-19 11:41:09 +0000272 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000273 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100274
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530275 if ((end - start) >= l2x0_size) {
276 l2x0_clean_all();
277 return;
278 }
279
Russell King0eb948d2009-11-19 11:12:15 +0000280 spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100281 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000282 while (start < end) {
283 unsigned long blk_end = start + min(end - start, 4096UL);
284
285 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100286 l2x0_clean_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000287 start += CACHE_LINE_SIZE;
288 }
289
290 if (blk_end < end) {
291 spin_unlock_irqrestore(&l2x0_lock, flags);
292 spin_lock_irqsave(&l2x0_lock, flags);
293 }
294 }
Russell King3d107432009-11-19 11:41:09 +0000295 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100296 cache_sync();
Russell King0eb948d2009-11-19 11:12:15 +0000297 spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100298}
299
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300static void l2x0_clean_range_atomic(unsigned long start, unsigned long end)
301{
302 unsigned long addr;
303
304 start &= ~(CACHE_LINE_SIZE - 1);
305 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
306 writel_relaxed(addr, l2x0_base + L2X0_CLEAN_LINE_PA);
307
308 mb();
309}
310
Catalin Marinas382266a2007-02-05 14:48:19 +0100311static void l2x0_flush_range(unsigned long start, unsigned long end)
312{
Russell King3d107432009-11-19 11:41:09 +0000313 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000314 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100315
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530316 if ((end - start) >= l2x0_size) {
317 l2x0_flush_all();
318 return;
319 }
320
Russell King0eb948d2009-11-19 11:12:15 +0000321 spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100322 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000323 while (start < end) {
324 unsigned long blk_end = start + min(end - start, 4096UL);
325
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100326 debug_writel(0x03);
Russell King0eb948d2009-11-19 11:12:15 +0000327 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100328 l2x0_flush_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000329 start += CACHE_LINE_SIZE;
330 }
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100331 debug_writel(0x00);
Russell King0eb948d2009-11-19 11:12:15 +0000332
333 if (blk_end < end) {
334 spin_unlock_irqrestore(&l2x0_lock, flags);
335 spin_lock_irqsave(&l2x0_lock, flags);
336 }
337 }
Russell King3d107432009-11-19 11:41:09 +0000338 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100339 cache_sync();
Russell King0eb948d2009-11-19 11:12:15 +0000340 spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100341}
342
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700343void l2x0_flush_range_atomic(unsigned long start, unsigned long end)
344{
345 unsigned long addr;
346
347 start &= ~(CACHE_LINE_SIZE - 1);
348 for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
349 writel_relaxed(addr, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
350
351 mb();
352}
353
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530354static void l2x0_disable(void)
355{
356 unsigned long flags;
357
358 spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100359 __l2x0_flush_all();
360 writel_relaxed(0, l2x0_base + L2X0_CTRL);
361 dsb();
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530362 spin_unlock_irqrestore(&l2x0_lock, flags);
363}
364
Catalin Marinas382266a2007-02-05 14:48:19 +0100365void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
366{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700367 __u32 aux, bits;
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530368 __u32 way_size = 0;
Jason McMullan64039be2010-05-05 18:59:37 +0100369 const char *type;
Catalin Marinas382266a2007-02-05 14:48:19 +0100370
371 l2x0_base = base;
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700372 l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700373
374 bits = readl_relaxed(l2x0_base + L2X0_CTRL);
375 bits &= ~0x01; /* clear bit 0 */
376 writel_relaxed(bits, l2x0_base + L2X0_CTRL);
377
Catalin Marinas6775a552010-07-28 22:01:25 +0100378 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100379
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100380 aux &= aux_mask;
381 aux |= aux_val;
382
Jason McMullan64039be2010-05-05 18:59:37 +0100383 /* Determine the number of ways */
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700384 switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100385 case L2X0_CACHE_ID_PART_L310:
386 if (aux & (1 << 16))
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700387 l2x0_ways = 16;
Jason McMullan64039be2010-05-05 18:59:37 +0100388 else
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700389 l2x0_ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100390 type = "L310";
391 break;
392 case L2X0_CACHE_ID_PART_L210:
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700393 l2x0_ways = (aux >> 13) & 0xf;
Jason McMullan64039be2010-05-05 18:59:37 +0100394 type = "L210";
395 break;
396 default:
397 /* Assume unknown chips have 8 ways */
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700398 l2x0_ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100399 type = "L2x0 series";
400 break;
401 }
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700402 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700403 l2x0_way_mask = (1 << l2x0_ways) - 1;
Jason McMullan64039be2010-05-05 18:59:37 +0100404
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100405 /*
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530406 * L2 cache Size = Way size * Number of ways
407 */
408 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700409 way_size = SZ_1K << (way_size + 3);
410 l2x0_size = l2x0_ways * way_size;
411 l2x0_sets = way_size / CACHE_LINE_SIZE;
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530412
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700413 l2x0_inv_all();
Catalin Marinas382266a2007-02-05 14:48:19 +0100414
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700415 /* enable L2X0 */
416 bits = readl_relaxed(l2x0_base + L2X0_CTRL);
417 bits |= 0x01; /* set bit 0 */
418 writel_relaxed(bits, l2x0_base + L2X0_CTRL);
Catalin Marinas382266a2007-02-05 14:48:19 +0100419
Bryan Huntsmand074fa22011-11-16 13:52:50 -0800420 switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700421 case L2X0_CACHE_ID_PART_L220:
422 outer_cache.inv_range = l2x0_inv_range;
423 outer_cache.clean_range = l2x0_clean_range;
424 outer_cache.flush_range = l2x0_flush_range;
425 printk(KERN_INFO "L220 cache controller enabled\n");
426 break;
427 case L2X0_CACHE_ID_PART_L310:
428 outer_cache.inv_range = l2x0_inv_range;
429 outer_cache.clean_range = l2x0_clean_range;
430 outer_cache.flush_range = l2x0_flush_range;
431 printk(KERN_INFO "L310 cache controller enabled\n");
432 break;
433 case L2X0_CACHE_ID_PART_L210:
434 default:
435 outer_cache.inv_range = l2x0_inv_range_atomic;
436 outer_cache.clean_range = l2x0_clean_range_atomic;
437 outer_cache.flush_range = l2x0_flush_range_atomic;
438 printk(KERN_INFO "L210 cache controller enabled\n");
439 break;
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100440 }
Catalin Marinas382266a2007-02-05 14:48:19 +0100441
Catalin Marinas23107c52010-03-24 16:48:53 +0100442 outer_cache.sync = l2x0_cache_sync;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700443
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530444 outer_cache.flush_all = l2x0_flush_all;
445 outer_cache.inv_all = l2x0_inv_all;
446 outer_cache.disable = l2x0_disable;
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100447 outer_cache.set_debug = l2x0_set_debug;
Catalin Marinas382266a2007-02-05 14:48:19 +0100448
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700449 mb();
Jason McMullan64039be2010-05-05 18:59:37 +0100450 printk(KERN_INFO "%s cache controller enabled\n", type);
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530451 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
Colin Cross5ea3a7c2011-09-14 15:59:50 -0700452 l2x0_ways, l2x0_cache_id, aux, l2x0_size);
Catalin Marinas382266a2007-02-05 14:48:19 +0100453}
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700454
455void l2x0_suspend(void)
456{
457 /* Save aux control register value */
458 aux_ctrl_save = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Maheshkumar Sivasubramanianc71d8ff2011-09-26 13:17:58 -0600459 data_latency_ctrl = readl_relaxed(l2x0_base + L2X0_DATA_LATENCY_CTRL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700460 /* Flush all cache */
461 l2x0_flush_all();
462 /* Disable the cache */
463 writel_relaxed(0, l2x0_base + L2X0_CTRL);
464
465 /* Memory barrier */
466 dmb();
467}
468
469void l2x0_resume(int collapsed)
470{
471 if (collapsed) {
472 /* Disable the cache */
473 writel_relaxed(0, l2x0_base + L2X0_CTRL);
474
475 /* Restore aux control register value */
476 writel_relaxed(aux_ctrl_save, l2x0_base + L2X0_AUX_CTRL);
Maheshkumar Sivasubramanianc71d8ff2011-09-26 13:17:58 -0600477 writel_relaxed(data_latency_ctrl, l2x0_base +
478 L2X0_DATA_LATENCY_CTRL);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700479
480 /* Invalidate the cache */
481 l2x0_inv_all();
482 }
483
484 /* Enable the cache */
485 writel_relaxed(1, l2x0_base + L2X0_CTRL);
486
487 mb();
488}