blob: adab76d1bff9f1454fda9776780a8e629c638de7 [file] [log] [blame]
Catalin Marinas382266a2007-02-05 14:48:19 +01001/*
2 * arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
3 *
4 * Copyright (C) 2007 ARM Limited
Taniya Das38a8c6e2012-05-09 20:34:39 +05305 * Copyright (c) 2009, 2011-2012, Code Aurora Forum. All rights reserved.
Catalin Marinas382266a2007-02-05 14:48:19 +01006 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
Rob Herring8c369262011-08-03 18:12:05 +010020#include <linux/err.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010021#include <linux/init.h>
Catalin Marinas07620972007-07-20 11:42:40 +010022#include <linux/spinlock.h>
Russell Kingfced80c2008-09-06 12:10:45 +010023#include <linux/io.h>
Rob Herring8c369262011-08-03 18:12:05 +010024#include <linux/of.h>
25#include <linux/of_address.h>
Mark Rutland29081e12012-11-21 17:02:51 -050026#include <linux/platform_device.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010027
28#include <asm/cacheflush.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010029#include <asm/hardware/cache-l2x0.h>
Mark Rutland29081e12012-11-21 17:02:51 -050030#include <asm/irq_regs.h>
31#include <asm/pmu.h>
Catalin Marinas382266a2007-02-05 14:48:19 +010032
33#define CACHE_LINE_SIZE 32
34
35static void __iomem *l2x0_base;
Thomas Gleixnerbd31b852009-07-03 08:44:46 -050036static DEFINE_RAW_SPINLOCK(l2x0_lock);
Russell King3e175ca2011-09-18 11:27:30 +010037static u32 l2x0_way_mask; /* Bitmask of active ways */
38static u32 l2x0_size;
Colin Cross74b6cdd2011-09-14 15:59:50 -070039static u32 l2x0_cache_id;
40static unsigned int l2x0_sets;
41static unsigned int l2x0_ways;
Will Deaconf154fe92012-04-20 17:21:08 +010042static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
Taniya Das38a8c6e2012-05-09 20:34:39 +053043static void pl310_save(void);
Girish Mahadevan74934a32012-10-11 10:44:47 -060044static void pl310_resume(void);
45static void l2x0_resume(void);
Colin Cross74b6cdd2011-09-14 15:59:50 -070046
47static inline bool is_pl310_rev(int rev)
48{
49 return (l2x0_cache_id &
50 (L2X0_CACHE_ID_PART_MASK | L2X0_CACHE_ID_REV_MASK)) ==
51 (L2X0_CACHE_ID_PART_L310 | rev);
52}
Catalin Marinas382266a2007-02-05 14:48:19 +010053
Barry Song91c2ebb2011-09-30 14:43:12 +010054struct l2x0_regs l2x0_saved_regs;
55
56struct l2x0_of_data {
Russell King3e175ca2011-09-18 11:27:30 +010057 void (*setup)(const struct device_node *, u32 *, u32 *);
Barry Song91c2ebb2011-09-30 14:43:12 +010058 void (*save)(void);
59 void (*resume)(void);
60};
61
Catalin Marinas9a6655e2010-08-31 13:05:22 +010062static inline void cache_wait_way(void __iomem *reg, unsigned long mask)
Catalin Marinas382266a2007-02-05 14:48:19 +010063{
Catalin Marinas9a6655e2010-08-31 13:05:22 +010064 /* wait for cache operation by line or way to complete */
Catalin Marinas6775a552010-07-28 22:01:25 +010065 while (readl_relaxed(reg) & mask)
Barry Song1caf3092011-09-09 10:30:34 +010066 cpu_relax();
Catalin Marinas382266a2007-02-05 14:48:19 +010067}
68
Catalin Marinas9a6655e2010-08-31 13:05:22 +010069#ifdef CONFIG_CACHE_PL310
70static inline void cache_wait(void __iomem *reg, unsigned long mask)
71{
72 /* cache operations by line are atomic on PL310 */
73}
74#else
75#define cache_wait cache_wait_way
76#endif
77
Catalin Marinas382266a2007-02-05 14:48:19 +010078static inline void cache_sync(void)
79{
Russell King3d107432009-11-19 11:41:09 +000080 void __iomem *base = l2x0_base;
Srinidhi Kasagar885028e2011-02-17 07:03:51 +010081
Will Deaconf154fe92012-04-20 17:21:08 +010082 writel_relaxed(0, base + sync_reg_offset);
Russell King3d107432009-11-19 11:41:09 +000083 cache_wait(base + L2X0_CACHE_SYNC, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +010084}
85
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010086static inline void l2x0_clean_line(unsigned long addr)
87{
88 void __iomem *base = l2x0_base;
89 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010090 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010091}
92
93static inline void l2x0_inv_line(unsigned long addr)
94{
95 void __iomem *base = l2x0_base;
96 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +010097 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +010098}
99
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100100#if defined(CONFIG_PL310_ERRATA_588369) || defined(CONFIG_PL310_ERRATA_727915)
Will Deaconab4d5362012-04-20 17:22:11 +0100101static inline void debug_writel(unsigned long val)
102{
103 if (outer_cache.set_debug)
104 outer_cache.set_debug(val);
105}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100106
Will Deaconab4d5362012-04-20 17:22:11 +0100107static void pl310_set_debug(unsigned long val)
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100108{
109 writel_relaxed(val, l2x0_base + L2X0_DEBUG_CTRL);
110}
111#else
112/* Optimised out for non-errata case */
113static inline void debug_writel(unsigned long val)
114{
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100115}
116
Will Deaconab4d5362012-04-20 17:22:11 +0100117#define pl310_set_debug NULL
Santosh Shilimkar2839e062011-03-08 06:59:54 +0100118#endif
119
120#ifdef CONFIG_PL310_ERRATA_588369
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100121static inline void l2x0_flush_line(unsigned long addr)
122{
123 void __iomem *base = l2x0_base;
124
125 /* Clean by PA followed by Invalidate by PA */
126 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100127 writel_relaxed(addr, base + L2X0_CLEAN_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100128 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100129 writel_relaxed(addr, base + L2X0_INV_LINE_PA);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100130}
131#else
132
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100133static inline void l2x0_flush_line(unsigned long addr)
134{
135 void __iomem *base = l2x0_base;
136 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100137 writel_relaxed(addr, base + L2X0_CLEAN_INV_LINE_PA);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100138}
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100139#endif
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100140
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141void l2x0_cache_sync(void)
Catalin Marinas23107c52010-03-24 16:48:53 +0100142{
143 unsigned long flags;
144
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500145 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100146 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500147 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas23107c52010-03-24 16:48:53 +0100148}
149
Colin Cross74b6cdd2011-09-14 15:59:50 -0700150#ifdef CONFIG_PL310_ERRATA_727915
151static void l2x0_for_each_set_way(void __iomem *reg)
152{
153 int set;
154 int way;
155 unsigned long flags;
156
157 for (way = 0; way < l2x0_ways; way++) {
158 raw_spin_lock_irqsave(&l2x0_lock, flags);
159 for (set = 0; set < l2x0_sets; set++)
160 writel_relaxed((way << 28) | (set << 5), reg);
161 cache_sync();
162 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
163 }
164}
165#endif
166
Will Deacon38a89142011-07-01 14:36:19 +0100167static void __l2x0_flush_all(void)
168{
169 debug_writel(0x03);
170 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_INV_WAY);
171 cache_wait_way(l2x0_base + L2X0_CLEAN_INV_WAY, l2x0_way_mask);
172 cache_sync();
173 debug_writel(0x00);
174}
175
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530176static void l2x0_flush_all(void)
177{
178 unsigned long flags;
179
Colin Cross74b6cdd2011-09-14 15:59:50 -0700180#ifdef CONFIG_PL310_ERRATA_727915
181 if (is_pl310_rev(REV_PL310_R2P0)) {
182 l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_INV_LINE_IDX);
183 return;
184 }
185#endif
186
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530187 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500188 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100189 __l2x0_flush_all();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500190 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530191}
192
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530193static void l2x0_clean_all(void)
194{
195 unsigned long flags;
196
Colin Cross74b6cdd2011-09-14 15:59:50 -0700197#ifdef CONFIG_PL310_ERRATA_727915
198 if (is_pl310_rev(REV_PL310_R2P0)) {
199 l2x0_for_each_set_way(l2x0_base + L2X0_CLEAN_LINE_IDX);
200 return;
201 }
202#endif
203
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530204 /* clean all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500205 raw_spin_lock_irqsave(&l2x0_lock, flags);
Colin Cross74b6cdd2011-09-14 15:59:50 -0700206 debug_writel(0x03);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530207 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_CLEAN_WAY);
208 cache_wait_way(l2x0_base + L2X0_CLEAN_WAY, l2x0_way_mask);
209 cache_sync();
Colin Cross74b6cdd2011-09-14 15:59:50 -0700210 debug_writel(0x00);
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500211 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530212}
213
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530214static void l2x0_inv_all(void)
Catalin Marinas382266a2007-02-05 14:48:19 +0100215{
Russell King0eb948d2009-11-19 11:12:15 +0000216 unsigned long flags;
217
Catalin Marinas382266a2007-02-05 14:48:19 +0100218 /* invalidate all ways */
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500219 raw_spin_lock_irqsave(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530220 /* Invalidating when L2 is enabled is a nono */
221 BUG_ON(readl(l2x0_base + L2X0_CTRL) & 1);
Catalin Marinas6775a552010-07-28 22:01:25 +0100222 writel_relaxed(l2x0_way_mask, l2x0_base + L2X0_INV_WAY);
Catalin Marinas9a6655e2010-08-31 13:05:22 +0100223 cache_wait_way(l2x0_base + L2X0_INV_WAY, l2x0_way_mask);
Catalin Marinas382266a2007-02-05 14:48:19 +0100224 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500225 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100226}
227
228static void l2x0_inv_range(unsigned long start, unsigned long end)
229{
Russell King3d107432009-11-19 11:41:09 +0000230 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000231 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100232
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500233 raw_spin_lock_irqsave(&l2x0_lock, flags);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100234 if (start & (CACHE_LINE_SIZE - 1)) {
235 start &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100236 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100237 l2x0_flush_line(start);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100238 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100239 start += CACHE_LINE_SIZE;
240 }
241
242 if (end & (CACHE_LINE_SIZE - 1)) {
243 end &= ~(CACHE_LINE_SIZE - 1);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100244 debug_writel(0x03);
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100245 l2x0_flush_line(end);
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100246 debug_writel(0x00);
Rui Sousa4f6627a2007-09-15 00:56:19 +0100247 }
248
Russell King0eb948d2009-11-19 11:12:15 +0000249 while (start < end) {
250 unsigned long blk_end = start + min(end - start, 4096UL);
251
252 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100253 l2x0_inv_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000254 start += CACHE_LINE_SIZE;
255 }
256
257 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500258 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
259 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000260 }
261 }
Russell King3d107432009-11-19 11:41:09 +0000262 cache_wait(base + L2X0_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100263 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500264 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100265}
266
267static void l2x0_clean_range(unsigned long start, unsigned long end)
268{
Russell King3d107432009-11-19 11:41:09 +0000269 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000270 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100271
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530272 if ((end - start) >= l2x0_size) {
273 l2x0_clean_all();
274 return;
275 }
276
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500277 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100278 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000279 while (start < end) {
280 unsigned long blk_end = start + min(end - start, 4096UL);
281
282 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100283 l2x0_clean_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000284 start += CACHE_LINE_SIZE;
285 }
286
287 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500288 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
289 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000290 }
291 }
Russell King3d107432009-11-19 11:41:09 +0000292 cache_wait(base + L2X0_CLEAN_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100293 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500294 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100295}
296
297static void l2x0_flush_range(unsigned long start, unsigned long end)
298{
Russell King3d107432009-11-19 11:41:09 +0000299 void __iomem *base = l2x0_base;
Russell King0eb948d2009-11-19 11:12:15 +0000300 unsigned long flags;
Catalin Marinas382266a2007-02-05 14:48:19 +0100301
Santosh Shilimkar444457c2010-07-11 14:58:41 +0530302 if ((end - start) >= l2x0_size) {
303 l2x0_flush_all();
304 return;
305 }
306
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500307 raw_spin_lock_irqsave(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100308 start &= ~(CACHE_LINE_SIZE - 1);
Russell King0eb948d2009-11-19 11:12:15 +0000309 while (start < end) {
310 unsigned long blk_end = start + min(end - start, 4096UL);
311
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100312 debug_writel(0x03);
Russell King0eb948d2009-11-19 11:12:15 +0000313 while (start < blk_end) {
Santosh Shilimkar424d6b12010-02-04 19:35:06 +0100314 l2x0_flush_line(start);
Russell King0eb948d2009-11-19 11:12:15 +0000315 start += CACHE_LINE_SIZE;
316 }
Santosh Shilimkar9e655822010-02-04 19:42:42 +0100317 debug_writel(0x00);
Russell King0eb948d2009-11-19 11:12:15 +0000318
319 if (blk_end < end) {
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500320 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
321 raw_spin_lock_irqsave(&l2x0_lock, flags);
Russell King0eb948d2009-11-19 11:12:15 +0000322 }
323 }
Russell King3d107432009-11-19 11:41:09 +0000324 cache_wait(base + L2X0_CLEAN_INV_LINE_PA, 1);
Catalin Marinas382266a2007-02-05 14:48:19 +0100325 cache_sync();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500326 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Catalin Marinas382266a2007-02-05 14:48:19 +0100327}
328
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530329static void l2x0_disable(void)
330{
331 unsigned long flags;
332
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500333 raw_spin_lock_irqsave(&l2x0_lock, flags);
Will Deacon38a89142011-07-01 14:36:19 +0100334 __l2x0_flush_all();
335 writel_relaxed(0, l2x0_base + L2X0_CTRL);
336 dsb();
Thomas Gleixnerbd31b852009-07-03 08:44:46 -0500337 raw_spin_unlock_irqrestore(&l2x0_lock, flags);
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530338}
339
Russell King3e175ca2011-09-18 11:27:30 +0100340static void l2x0_unlock(u32 cache_id)
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100341{
342 int lockregs;
343 int i;
344
345 if (cache_id == L2X0_CACHE_ID_PART_L310)
346 lockregs = 8;
347 else
348 /* L210 and unknown types */
349 lockregs = 1;
350
351 for (i = 0; i < lockregs; i++) {
352 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_D_BASE +
353 i * L2X0_LOCKDOWN_STRIDE);
354 writel_relaxed(0x0, l2x0_base + L2X0_LOCKDOWN_WAY_I_BASE +
355 i * L2X0_LOCKDOWN_STRIDE);
356 }
357}
358
Russell King3e175ca2011-09-18 11:27:30 +0100359void __init l2x0_init(void __iomem *base, u32 aux_val, u32 aux_mask)
Catalin Marinas382266a2007-02-05 14:48:19 +0100360{
Russell King3e175ca2011-09-18 11:27:30 +0100361 u32 aux;
Russell King3e175ca2011-09-18 11:27:30 +0100362 u32 way_size = 0;
Jason McMullan64039be2010-05-05 18:59:37 +0100363 const char *type;
Catalin Marinas382266a2007-02-05 14:48:19 +0100364
365 l2x0_base = base;
366
Colin Cross74b6cdd2011-09-14 15:59:50 -0700367 l2x0_cache_id = readl_relaxed(l2x0_base + L2X0_CACHE_ID);
Catalin Marinas6775a552010-07-28 22:01:25 +0100368 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
Jason McMullan64039be2010-05-05 18:59:37 +0100369
Sascha Hauer4082cfa2010-07-08 08:36:21 +0100370 aux &= aux_mask;
371 aux |= aux_val;
372
Jason McMullan64039be2010-05-05 18:59:37 +0100373 /* Determine the number of ways */
Colin Cross74b6cdd2011-09-14 15:59:50 -0700374 switch (l2x0_cache_id & L2X0_CACHE_ID_PART_MASK) {
Jason McMullan64039be2010-05-05 18:59:37 +0100375 case L2X0_CACHE_ID_PART_L310:
376 if (aux & (1 << 16))
Colin Cross74b6cdd2011-09-14 15:59:50 -0700377 l2x0_ways = 16;
Jason McMullan64039be2010-05-05 18:59:37 +0100378 else
Colin Cross74b6cdd2011-09-14 15:59:50 -0700379 l2x0_ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100380 type = "L310";
Will Deaconf154fe92012-04-20 17:21:08 +0100381#ifdef CONFIG_PL310_ERRATA_753970
382 /* Unmapped register. */
383 sync_reg_offset = L2X0_DUMMY_REG;
384#endif
Will Deaconab4d5362012-04-20 17:22:11 +0100385 outer_cache.set_debug = pl310_set_debug;
Girish Mahadevan74934a32012-10-11 10:44:47 -0600386 outer_cache.resume = pl310_resume;
Jason McMullan64039be2010-05-05 18:59:37 +0100387 break;
388 case L2X0_CACHE_ID_PART_L210:
Colin Cross74b6cdd2011-09-14 15:59:50 -0700389 l2x0_ways = (aux >> 13) & 0xf;
Jason McMullan64039be2010-05-05 18:59:37 +0100390 type = "L210";
Girish Mahadevan74934a32012-10-11 10:44:47 -0600391 outer_cache.resume = l2x0_resume;
Jason McMullan64039be2010-05-05 18:59:37 +0100392 break;
393 default:
394 /* Assume unknown chips have 8 ways */
Colin Cross74b6cdd2011-09-14 15:59:50 -0700395 l2x0_ways = 8;
Jason McMullan64039be2010-05-05 18:59:37 +0100396 type = "L2x0 series";
Girish Mahadevan74934a32012-10-11 10:44:47 -0600397 outer_cache.resume = l2x0_resume;
Jason McMullan64039be2010-05-05 18:59:37 +0100398 break;
399 }
400
Colin Cross74b6cdd2011-09-14 15:59:50 -0700401 l2x0_way_mask = (1 << l2x0_ways) - 1;
Jason McMullan64039be2010-05-05 18:59:37 +0100402
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100403 /*
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530404 * L2 cache Size = Way size * Number of ways
405 */
406 way_size = (aux & L2X0_AUX_CTRL_WAY_SIZE_MASK) >> 17;
Colin Cross74b6cdd2011-09-14 15:59:50 -0700407 way_size = SZ_1K << (way_size + 3);
408 l2x0_size = l2x0_ways * way_size;
409 l2x0_sets = way_size / CACHE_LINE_SIZE;
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530410
411 /*
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100412 * Check if l2x0 controller is already enabled.
413 * If you are booting from non-secure mode
414 * accessing the below registers will fault.
415 */
Catalin Marinas6775a552010-07-28 22:01:25 +0100416 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
Linus Walleijbac7e6e2011-09-06 07:45:46 +0100417 /* Make sure that I&D is not locked down when starting */
Colin Cross74b6cdd2011-09-14 15:59:50 -0700418 l2x0_unlock(l2x0_cache_id);
Catalin Marinas382266a2007-02-05 14:48:19 +0100419
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100420 /* l2x0 controller is disabled */
Catalin Marinas6775a552010-07-28 22:01:25 +0100421 writel_relaxed(aux, l2x0_base + L2X0_AUX_CTRL);
Catalin Marinas382266a2007-02-05 14:48:19 +0100422
Barry Song91c2ebb2011-09-30 14:43:12 +0100423 l2x0_saved_regs.aux_ctrl = aux;
424
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100425 l2x0_inv_all();
426
427 /* enable L2X0 */
Catalin Marinas6775a552010-07-28 22:01:25 +0100428 writel_relaxed(1, l2x0_base + L2X0_CTRL);
Srinidhi Kasagar48371cd2009-12-02 06:18:03 +0100429 }
Catalin Marinas382266a2007-02-05 14:48:19 +0100430
Steve Mucklef132c6c2012-06-06 18:30:57 -0700431 outer_cache.inv_range = l2x0_inv_range;
432 outer_cache.clean_range = l2x0_clean_range;
433 outer_cache.flush_range = l2x0_flush_range;
Catalin Marinas23107c52010-03-24 16:48:53 +0100434 outer_cache.sync = l2x0_cache_sync;
Thomas Gleixner2fd86582010-07-31 21:05:24 +0530435 outer_cache.flush_all = l2x0_flush_all;
436 outer_cache.inv_all = l2x0_inv_all;
437 outer_cache.disable = l2x0_disable;
Catalin Marinas382266a2007-02-05 14:48:19 +0100438
Jason McMullan64039be2010-05-05 18:59:37 +0100439 printk(KERN_INFO "%s cache controller enabled\n", type);
Santosh Shilimkar5ba70372010-07-11 14:35:37 +0530440 printk(KERN_INFO "l2x0: %d ways, CACHE_ID 0x%08x, AUX_CTRL 0x%08x, Cache size: %d B\n",
Colin Cross74b6cdd2011-09-14 15:59:50 -0700441 l2x0_ways, l2x0_cache_id, aux, l2x0_size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700442
Taniya Das38a8c6e2012-05-09 20:34:39 +0530443 /* Save the L2X0 contents, as they are not modified else where */
444 pl310_save();
Catalin Marinas382266a2007-02-05 14:48:19 +0100445}
Rob Herring8c369262011-08-03 18:12:05 +0100446
447#ifdef CONFIG_OF
448static void __init l2x0_of_setup(const struct device_node *np,
Russell King3e175ca2011-09-18 11:27:30 +0100449 u32 *aux_val, u32 *aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +0100450{
451 u32 data[2] = { 0, 0 };
452 u32 tag = 0;
453 u32 dirty = 0;
454 u32 val = 0, mask = 0;
455
456 of_property_read_u32(np, "arm,tag-latency", &tag);
457 if (tag) {
458 mask |= L2X0_AUX_CTRL_TAG_LATENCY_MASK;
459 val |= (tag - 1) << L2X0_AUX_CTRL_TAG_LATENCY_SHIFT;
460 }
461
462 of_property_read_u32_array(np, "arm,data-latency",
463 data, ARRAY_SIZE(data));
464 if (data[0] && data[1]) {
465 mask |= L2X0_AUX_CTRL_DATA_RD_LATENCY_MASK |
466 L2X0_AUX_CTRL_DATA_WR_LATENCY_MASK;
467 val |= ((data[0] - 1) << L2X0_AUX_CTRL_DATA_RD_LATENCY_SHIFT) |
468 ((data[1] - 1) << L2X0_AUX_CTRL_DATA_WR_LATENCY_SHIFT);
469 }
470
471 of_property_read_u32(np, "arm,dirty-latency", &dirty);
472 if (dirty) {
473 mask |= L2X0_AUX_CTRL_DIRTY_LATENCY_MASK;
474 val |= (dirty - 1) << L2X0_AUX_CTRL_DIRTY_LATENCY_SHIFT;
475 }
476
477 *aux_val &= ~mask;
478 *aux_val |= val;
479 *aux_mask &= ~mask;
480}
481
482static void __init pl310_of_setup(const struct device_node *np,
Russell King3e175ca2011-09-18 11:27:30 +0100483 u32 *aux_val, u32 *aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +0100484{
485 u32 data[3] = { 0, 0, 0 };
486 u32 tag[3] = { 0, 0, 0 };
487 u32 filter[2] = { 0, 0 };
488
489 of_property_read_u32_array(np, "arm,tag-latency", tag, ARRAY_SIZE(tag));
490 if (tag[0] && tag[1] && tag[2])
491 writel_relaxed(
492 ((tag[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
493 ((tag[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
494 ((tag[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
495 l2x0_base + L2X0_TAG_LATENCY_CTRL);
496
497 of_property_read_u32_array(np, "arm,data-latency",
498 data, ARRAY_SIZE(data));
499 if (data[0] && data[1] && data[2])
500 writel_relaxed(
501 ((data[0] - 1) << L2X0_LATENCY_CTRL_RD_SHIFT) |
502 ((data[1] - 1) << L2X0_LATENCY_CTRL_WR_SHIFT) |
503 ((data[2] - 1) << L2X0_LATENCY_CTRL_SETUP_SHIFT),
504 l2x0_base + L2X0_DATA_LATENCY_CTRL);
505
506 of_property_read_u32_array(np, "arm,filter-ranges",
507 filter, ARRAY_SIZE(filter));
Barry Song74d41f32011-09-14 03:20:01 +0100508 if (filter[1]) {
Rob Herring8c369262011-08-03 18:12:05 +0100509 writel_relaxed(ALIGN(filter[0] + filter[1], SZ_1M),
510 l2x0_base + L2X0_ADDR_FILTER_END);
511 writel_relaxed((filter[0] & ~(SZ_1M - 1)) | L2X0_ADDR_FILTER_EN,
512 l2x0_base + L2X0_ADDR_FILTER_START);
513 }
514}
Taniya Das38a8c6e2012-05-09 20:34:39 +0530515#endif
Rob Herring8c369262011-08-03 18:12:05 +0100516
Stephen Boyd22ab9342012-04-25 11:42:14 -0700517static void pl310_save(void)
Barry Song91c2ebb2011-09-30 14:43:12 +0100518{
519 u32 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
520 L2X0_CACHE_ID_RTL_MASK;
521
522 l2x0_saved_regs.tag_latency = readl_relaxed(l2x0_base +
523 L2X0_TAG_LATENCY_CTRL);
524 l2x0_saved_regs.data_latency = readl_relaxed(l2x0_base +
525 L2X0_DATA_LATENCY_CTRL);
526 l2x0_saved_regs.filter_end = readl_relaxed(l2x0_base +
527 L2X0_ADDR_FILTER_END);
528 l2x0_saved_regs.filter_start = readl_relaxed(l2x0_base +
529 L2X0_ADDR_FILTER_START);
530
531 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
532 /*
533 * From r2p0, there is Prefetch offset/control register
534 */
535 l2x0_saved_regs.prefetch_ctrl = readl_relaxed(l2x0_base +
536 L2X0_PREFETCH_CTRL);
537 /*
538 * From r3p0, there is Power control register
539 */
540 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
541 l2x0_saved_regs.pwr_ctrl = readl_relaxed(l2x0_base +
542 L2X0_POWER_CTRL);
543 }
544}
545
546static void l2x0_resume(void)
547{
548 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
549 /* restore aux ctrl and enable l2 */
550 l2x0_unlock(readl_relaxed(l2x0_base + L2X0_CACHE_ID));
551
552 writel_relaxed(l2x0_saved_regs.aux_ctrl, l2x0_base +
553 L2X0_AUX_CTRL);
554
555 l2x0_inv_all();
556
557 writel_relaxed(1, l2x0_base + L2X0_CTRL);
558 }
559}
560
561static void pl310_resume(void)
562{
563 u32 l2x0_revision;
564
565 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
566 /* restore pl310 setup */
567 writel_relaxed(l2x0_saved_regs.tag_latency,
568 l2x0_base + L2X0_TAG_LATENCY_CTRL);
569 writel_relaxed(l2x0_saved_regs.data_latency,
570 l2x0_base + L2X0_DATA_LATENCY_CTRL);
571 writel_relaxed(l2x0_saved_regs.filter_end,
572 l2x0_base + L2X0_ADDR_FILTER_END);
573 writel_relaxed(l2x0_saved_regs.filter_start,
574 l2x0_base + L2X0_ADDR_FILTER_START);
575
576 l2x0_revision = readl_relaxed(l2x0_base + L2X0_CACHE_ID) &
577 L2X0_CACHE_ID_RTL_MASK;
578
579 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R2P0) {
580 writel_relaxed(l2x0_saved_regs.prefetch_ctrl,
581 l2x0_base + L2X0_PREFETCH_CTRL);
582 if (l2x0_revision >= L2X0_CACHE_ID_RTL_R3P0)
583 writel_relaxed(l2x0_saved_regs.pwr_ctrl,
584 l2x0_base + L2X0_POWER_CTRL);
585 }
586 }
587
588 l2x0_resume();
589}
590
Taniya Das38a8c6e2012-05-09 20:34:39 +0530591#ifdef CONFIG_OF
Barry Song91c2ebb2011-09-30 14:43:12 +0100592static const struct l2x0_of_data pl310_data = {
593 pl310_of_setup,
594 pl310_save,
595 pl310_resume,
596};
597
598static const struct l2x0_of_data l2x0_data = {
599 l2x0_of_setup,
600 NULL,
601 l2x0_resume,
602};
603
Rob Herring8c369262011-08-03 18:12:05 +0100604static const struct of_device_id l2x0_ids[] __initconst = {
Barry Song91c2ebb2011-09-30 14:43:12 +0100605 { .compatible = "arm,pl310-cache", .data = (void *)&pl310_data },
606 { .compatible = "arm,l220-cache", .data = (void *)&l2x0_data },
607 { .compatible = "arm,l210-cache", .data = (void *)&l2x0_data },
Rob Herring8c369262011-08-03 18:12:05 +0100608 {}
609};
610
Russell King3e175ca2011-09-18 11:27:30 +0100611int __init l2x0_of_init(u32 aux_val, u32 aux_mask)
Rob Herring8c369262011-08-03 18:12:05 +0100612{
613 struct device_node *np;
Barry Song91c2ebb2011-09-30 14:43:12 +0100614 struct l2x0_of_data *data;
615 struct resource res;
Rob Herring8c369262011-08-03 18:12:05 +0100616
617 np = of_find_matching_node(NULL, l2x0_ids);
618 if (!np)
619 return -ENODEV;
Barry Song91c2ebb2011-09-30 14:43:12 +0100620
621 if (of_address_to_resource(np, 0, &res))
622 return -ENODEV;
623
624 l2x0_base = ioremap(res.start, resource_size(&res));
Rob Herring8c369262011-08-03 18:12:05 +0100625 if (!l2x0_base)
626 return -ENOMEM;
627
Barry Song91c2ebb2011-09-30 14:43:12 +0100628 l2x0_saved_regs.phy_base = res.start;
629
630 data = of_match_node(l2x0_ids, np)->data;
631
Rob Herring8c369262011-08-03 18:12:05 +0100632 /* L2 configuration can only be changed if the cache is disabled */
633 if (!(readl_relaxed(l2x0_base + L2X0_CTRL) & 1)) {
Barry Song91c2ebb2011-09-30 14:43:12 +0100634 if (data->setup)
635 data->setup(np, &aux_val, &aux_mask);
Rob Herring8c369262011-08-03 18:12:05 +0100636 }
Barry Song91c2ebb2011-09-30 14:43:12 +0100637
638 if (data->save)
639 data->save();
640
Rob Herring8c369262011-08-03 18:12:05 +0100641 l2x0_init(l2x0_base, aux_val, aux_mask);
Barry Song91c2ebb2011-09-30 14:43:12 +0100642
643 outer_cache.resume = data->resume;
Rob Herring8c369262011-08-03 18:12:05 +0100644 return 0;
645}
646#endif
Taniya Das38a8c6e2012-05-09 20:34:39 +0530647
648void l2cc_suspend(void)
649{
650 l2x0_disable();
651 dmb();
652}
653
654void l2cc_resume(void)
655{
656 pl310_resume();
657 dmb();
658}
Mark Rutland29081e12012-11-21 17:02:51 -0500659
660#ifdef CONFIG_HW_PERF_EVENTS
661/*
662 * L220/PL310 PMU-specific functionality.
663 * TODO: Put this in a separate file and get the l2x0 driver to register
664 * the PMU from l2x0_{of}_init.
665 */
666
667static struct arm_pmu l2x0_pmu;
668
669static u64 l2x0pmu_max_event_id;
670
671static struct perf_event *events[2];
672static unsigned long used_mask[BITS_TO_LONGS(2)];
673static struct pmu_hw_events hw_events = {
674 .events = events,
675 .used_mask = used_mask,
676 .pmu_lock = __RAW_SPIN_LOCK_UNLOCKED(l2x0pmu_hw_events.pmu_lock),
677};
678
679#define COUNTER_CFG_ADDR(idx) (l2x0_base + L2X0_EVENT_CNT0_CFG - 4*idx)
680
681#define COUNTER_CTRL_ADDR (l2x0_base + L2X0_EVENT_CNT_CTRL)
682
683#define COUNTER_ADDR(idx) (l2x0_base + L2X0_EVENT_CNT0_VAL - 4*idx)
684
685static u32 l2x0_read_intr_mask(void)
686{
687 return readl_relaxed(l2x0_base + L2X0_INTR_MASK);
688}
689
690static void l2x0_write_intr_mask(u32 val)
691{
692 writel_relaxed(val, l2x0_base + L2X0_INTR_MASK);
693}
694
695static void l2x0_enable_counter_interrupt(void)
696{
697 u32 intr_mask = l2x0_read_intr_mask();
698 intr_mask |= L2X0_INTR_MASK_ECNTR;
699 l2x0_write_intr_mask(intr_mask);
700}
701
702static void l2x0_disable_counter_interrupt(void)
703{
704 u32 intr_mask = l2x0_read_intr_mask();
705 intr_mask &= ~L2X0_INTR_MASK_ECNTR;
706 l2x0_write_intr_mask(intr_mask);
707}
708
709static void l2x0_clear_interrupts(u32 flags)
710{
711 writel_relaxed(flags, l2x0_base + L2X0_INTR_CLEAR);
712}
713
714static struct pmu_hw_events *l2x0pmu_get_hw_events(void)
715{
716 return &hw_events;
717}
718
719static u32 l2x0pmu_read_ctrl(void)
720{
721 return readl_relaxed(COUNTER_CTRL_ADDR);
722}
723
724static void l2x0pmu_write_ctrl(u32 val)
725{
726 writel_relaxed(val, COUNTER_CTRL_ADDR);
727}
728
729static u32 l2x0pmu_read_cfg(int idx)
730{
731 return readl_relaxed(COUNTER_CFG_ADDR(idx));
732}
733
734static void l2x0pmu_write_cfg(u32 val, int idx)
735{
736 writel_relaxed(val, COUNTER_CFG_ADDR(idx));
737}
738
739static void l2x0pmu_enable_counter(u32 cfg, int idx)
740{
741 cfg |= L2X0_EVENT_CNT_CFG_INTR_OVERFLOW;
742 l2x0pmu_write_cfg(cfg, idx);
743}
744
745static u32 l2x0pmu_disable_counter(int idx)
746{
747 u32 cfg, oldcfg;
748
749 cfg = oldcfg = l2x0pmu_read_cfg(idx);
750 cfg &= ~L2X0_EVENT_CNT_CFG_MASK;
751 cfg &= ~L2X0_EVENT_CNT_CFG_INTR_MASK;
752 l2x0pmu_write_cfg(cfg, idx);
753
754 return oldcfg;
755}
756
757static u32 l2x0pmu_read_counter(int idx)
758{
759 return readl_relaxed(COUNTER_ADDR(idx));
760}
761
762static void l2x0pmu_write_counter(int idx, u32 val)
763{
764 /*
765 * L2X0 counters can only be written to when they are disabled.
766 * As perf core does not disable counters before writing to them
767 * under interrupts, we must do so here.
768 */
769 u32 cfg = l2x0pmu_disable_counter(idx);
770 writel_relaxed(val, COUNTER_ADDR(idx));
771 l2x0pmu_write_cfg(cfg, idx);
772}
773
774static int counter_is_saturated(int idx)
775{
776 return l2x0pmu_read_counter(idx) == 0xFFFFFFFF;
777}
778
779static void l2x0pmu_start(void)
780{
781 unsigned long flags;
782 u32 val;
783
784 raw_spin_lock_irqsave(&hw_events.pmu_lock, flags);
785
786 l2x0_enable_counter_interrupt();
787
788 val = l2x0pmu_read_ctrl();
789 val |= L2X0_EVENT_CNT_ENABLE;
790 l2x0pmu_write_ctrl(val);
791
792 raw_spin_unlock_irqrestore(&hw_events.pmu_lock, flags);
793}
794
795static void l2x0pmu_stop(void)
796{
797 unsigned long flags;
798 u32 val;
799
800 raw_spin_lock_irqsave(&hw_events.pmu_lock, flags);
801
802 val = l2x0pmu_read_ctrl();
803 val &= ~L2X0_EVENT_CNT_ENABLE_MASK;
804 l2x0pmu_write_ctrl(val);
805
806 l2x0_disable_counter_interrupt();
807
808 raw_spin_unlock_irqrestore(&hw_events.pmu_lock, flags);
809}
810
811static void l2x0pmu_enable(struct hw_perf_event *event, int idx, int cpu)
812{
813 unsigned long flags;
814 u32 cfg;
815
816 raw_spin_lock_irqsave(&hw_events.pmu_lock, flags);
817
818 cfg = (event->config_base << L2X0_EVENT_CNT_CFG_SHIFT) &
819 L2X0_EVENT_CNT_CFG_MASK;
820 l2x0pmu_enable_counter(cfg, idx);
821
822 raw_spin_unlock_irqrestore(&hw_events.pmu_lock, flags);
823}
824
825static void l2x0pmu_disable(struct hw_perf_event *event, int idx)
826{
827 unsigned long flags;
828
829 raw_spin_lock_irqsave(&hw_events.pmu_lock, flags);
830 l2x0pmu_disable_counter(idx);
831 raw_spin_unlock_irqrestore(&hw_events.pmu_lock, flags);
832}
833
834static int l2x0pmu_get_event_idx(struct pmu_hw_events *events,
835 struct hw_perf_event *hwc)
836{
837 int idx;
838
839 /* Counters are identical. Just grab a free one. */
840 for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
841 if (!test_and_set_bit(idx, hw_events.used_mask))
842 return idx;
843 }
844
845 return -EAGAIN;
846}
847
848/*
849 * As System PMUs are affine to CPU0, the fact that interrupts are disabled
850 * during interrupt handling is enough to serialise our actions and make this
851 * safe. We do not need to grab our pmu_lock here.
852 */
853static irqreturn_t l2x0pmu_handle_irq(int irq, void *dev)
854{
855 irqreturn_t status = IRQ_NONE;
856 struct perf_sample_data data;
857 struct pt_regs *regs;
858 int idx;
859
860 regs = get_irq_regs();
861
862 for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
863 struct perf_event *event = hw_events.events[idx];
864 struct hw_perf_event *hwc;
865
866 if (!counter_is_saturated(idx))
867 continue;
868
869 status = IRQ_HANDLED;
870
871 hwc = &event->hw;
872
873 /*
874 * The armpmu_* functions expect counters to overflow, but
875 * L220/PL310 counters saturate instead. Fake the overflow
876 * here so the hardware is in sync with what the framework
877 * expects.
878 */
879 l2x0pmu_write_counter(idx, 0);
880
881 armpmu_event_update(event, hwc, idx);
882 data.period = event->hw.last_period;
883
884 if (!armpmu_event_set_period(event, hwc, idx))
885 continue;
886
887 if (perf_event_overflow(event, &data, regs))
888 l2x0pmu_disable_counter(idx);
889 }
890
891 l2x0_clear_interrupts(L2X0_INTR_MASK_ECNTR);
892
893 irq_work_run();
894
895 return status;
896}
897
898static int map_l2x0_raw_event(u64 config)
899{
900 return (config <= l2x0pmu_max_event_id) ? config : -ENOENT;
901}
902
903static int l2x0pmu_map_event(struct perf_event *event)
904{
905 u64 config = event->attr.config;
906 u64 supported_samples = (PERF_SAMPLE_TIME |
907 PERF_SAMPLE_ID |
908 PERF_SAMPLE_PERIOD |
909 PERF_SAMPLE_STREAM_ID |
910 PERF_SAMPLE_RAW);
911
912 if (event->attr.type != l2x0_pmu.pmu.type)
913 return -ENOENT;
914
915 /*
916 * L2x0 counters are global across CPUs.
917 * If userspace ask perf to monitor from multiple CPUs, each CPU will
918 * report the shared total. When summed, this will be the actual value
919 * multiplied by the number of CPUs. We limit monitoring to a single
920 * CPU (0) to prevent confusion stemming from this.
921 */
922 if (event->cpu != 0)
923 return -ENOENT;
924
925 if (event->attr.sample_type & ~supported_samples)
926 return -ENOENT;
927
928 return map_l2x0_raw_event(config);
929}
930
931static struct arm_pmu l2x0_pmu = {
932 .id = ARM_PERF_PMU_ID_L2X0,
933 .type = ARM_PMU_DEVICE_L2CC,
934 .name = "ARM L220/PL310 L2 Cache controller",
935 .start = l2x0pmu_start,
936 .stop = l2x0pmu_stop,
937 .handle_irq = l2x0pmu_handle_irq,
938 .enable = l2x0pmu_enable,
939 .disable = l2x0pmu_disable,
940 .get_event_idx = l2x0pmu_get_event_idx,
941 .read_counter = l2x0pmu_read_counter,
942 .write_counter = l2x0pmu_write_counter,
943 .map_event = l2x0pmu_map_event,
944 .num_events = 2,
945 .max_period = 0xFFFFFFFF,
946 .get_hw_events = l2x0pmu_get_hw_events,
947};
948
949static int __devinit l2x0pmu_device_probe(struct platform_device *pdev)
950{
951 l2x0_pmu.plat_device = pdev;
952 /* FIXME: return code? */
953 armpmu_register(&l2x0_pmu, "l2x0", -1);
954 return 0;
955}
956
957static struct platform_driver l2x0pmu_driver = {
958 .driver = {
959 .name = "l2x0-pmu",
960 },
961 .probe = l2x0pmu_device_probe,
962};
963
964static int __init register_pmu_driver(void)
965{
966 return platform_driver_register(&l2x0pmu_driver);
967}
968device_initcall(register_pmu_driver);
969
970#endif /* CONFIG_HW_PERF_EVENTS */