blob: e2a580f8ccadfd9233b6b27046c883b1d7677c04 [file] [log] [blame]
Ashwin Chaugule8b459cc2012-11-26 15:20:54 -05001/*
2 * Copyright (C) 2007 ARM Limited
3 * Copyright (c) 2012, The Linux Foundation. All rights reserved.
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 and
7 * only version 2 as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 */
14
15
16#include <linux/irq.h>
17#include <linux/io.h>
18#include <linux/err.h>
19#include <linux/init.h>
20#include <linux/spinlock.h>
21#include <linux/platform_device.h>
22
23#include <asm/pmu.h>
24#include <asm/hardware/cache-l2x0.h>
Ashwin Chaugule3deb6b82012-12-21 13:09:48 -050025#include <mach/socinfo.h>
26
27static u32 rev1;
Ashwin Chaugule8b459cc2012-11-26 15:20:54 -050028
29/*
30 * Store dynamic PMU type after registration,
31 * to uniquely identify this PMU at runtime.
32 */
33static u32 pmu_type;
34
35/* This controller only supports 16 Events.*/
36PMU_FORMAT_ATTR(l2_config, "config:0-4");
37
38static struct attribute *arm_l2_ev_formats[] = {
39 &format_attr_l2_config.attr,
40 NULL,
41};
42
43/*
44 * Format group is essential to access PMU's from userspace
45 * via their .name field.
46 */
47static struct attribute_group arm_l2_pmu_format_group = {
48 .name = "format",
49 .attrs = arm_l2_ev_formats,
50};
51
52static const struct attribute_group *arm_l2_pmu_attr_grps[] = {
53 &arm_l2_pmu_format_group,
54 NULL,
55};
56
57#define L2X0_AUX_CTRL_EVENT_MONITOR_SHIFT 20
58#define L2X0_INTR_MASK_ECNTR 1
59
60/* L220/PL310 Event control register values */
61#define L2X0_EVENT_CNT_ENABLE_MASK 1
62#define L2X0_EVENT_CNT_ENABLE 1
63#define L2X0_EVENT_CNT_RESET(x) (1 << (x+1))
64
65/* Bit-shifted event counter config values */
66enum l2x0_perf_types {
67 L2X0_EVENT_CNT_CFG_DISABLED = 0x0,
68 L2X0_EVENT_CNT_CFG_CO = 0x1,
69 L2X0_EVENT_CNT_CFG_DRHIT = 0x2,
70 L2X0_EVENT_CNT_CFG_DRREQ = 0x3,
71 L2X0_EVENT_CNT_CFG_DWHIT = 0x4,
72 L2X0_EVENT_CNT_CFG_DWREQ = 0x5,
73 L2X0_EVENT_CNT_CFG_DWTREQ = 0x6,
74 L2X0_EVENT_CNT_CFG_IRHIT = 0x7,
75 L2X0_EVENT_CNT_CFG_IRREQ = 0x8,
76 L2X0_EVENT_CNT_CFG_WA = 0x9,
77
78 /* PL310 only */
79 L2X0_EVENT_CNT_CFG_IPFALLOC = 0xA,
80 L2X0_EVENT_CNT_CFG_EPFHIT = 0xB,
81 L2X0_EVENT_CNT_CFG_EPFALLOC = 0xC,
82 L2X0_EVENT_CNT_CFG_SRRCVD = 0xD,
83 L2X0_EVENT_CNT_CFG_SRCONF = 0xE,
84 L2X0_EVENT_CNT_CFG_EPFRCVD = 0xF,
85};
86
87#define PL310_EVENT_CNT_CFG_MAX L2X0_EVENT_CNT_CFG_EPFRCVD
88
89#define L2X0_EVENT_CNT_CFG_SHIFT 2
90#define L2X0_EVENT_CNT_CFG_MASK (0xF << 2)
91
92#define L2X0_EVENT_CNT_CFG_INTR_MASK 0x3
93#define L2X0_EVENT_CNT_CFG_INTR_DISABLED 0x0
94#define L2X0_EVENT_CNT_CFG_INTR_INCREMENT 0x1
95#define L2X0_EVENT_CNT_CFG_INTR_OVERFLOW 0x2
96
97#define L2X0_NUM_COUNTERS 2
98static struct arm_pmu l2x0_pmu;
99
100static u32 l2x0pmu_max_event_id = 0xf;
101
102static struct perf_event *events[2];
103static unsigned long used_mask[BITS_TO_LONGS(2)];
104static struct pmu_hw_events l2x0pmu_hw_events = {
105 .events = events,
106 .used_mask = used_mask,
107 .pmu_lock = __RAW_SPIN_LOCK_UNLOCKED(l2x0pmu_hw_events.pmu_lock),
108};
109
110#define COUNTER_CFG_ADDR(idx) (l2x0_base + L2X0_EVENT_CNT0_CFG - 4*idx)
111
112#define COUNTER_CTRL_ADDR (l2x0_base + L2X0_EVENT_CNT_CTRL)
113
114#define COUNTER_ADDR(idx) (l2x0_base + L2X0_EVENT_CNT0_VAL - 4*idx)
115
Ashwin Chaugule3deb6b82012-12-21 13:09:48 -0500116static u32 l2x0_read_intr_mask(void)
117{
118 return readl_relaxed(l2x0_base + L2X0_INTR_MASK);
119}
120
121static void l2x0_write_intr_mask(u32 val)
122{
123 writel_relaxed(val, l2x0_base + L2X0_INTR_MASK);
124}
125
126static void l2x0_enable_counter_interrupt(void)
127{
128 u32 intr_mask = l2x0_read_intr_mask();
129 intr_mask |= L2X0_INTR_MASK_ECNTR;
130 l2x0_write_intr_mask(intr_mask);
131}
132
133static void l2x0_disable_counter_interrupt(void)
134{
135 u32 intr_mask = l2x0_read_intr_mask();
136 intr_mask &= ~L2X0_INTR_MASK_ECNTR;
137 l2x0_write_intr_mask(intr_mask);
138}
139
Ashwin Chaugule8b459cc2012-11-26 15:20:54 -0500140static void l2x0_clear_interrupts(u32 flags)
141{
142 writel_relaxed(flags, l2x0_base + L2X0_INTR_CLEAR);
143}
144
145static struct pmu_hw_events *l2x0pmu_get_hw_events(void)
146{
147 return &l2x0pmu_hw_events;
148}
149
150static u32 l2x0pmu_read_ctrl(void)
151{
152 return readl_relaxed(COUNTER_CTRL_ADDR);
153}
154
155static void l2x0pmu_write_ctrl(u32 val)
156{
157 writel_relaxed(val, COUNTER_CTRL_ADDR);
158}
159
160static u32 l2x0pmu_read_cfg(int idx)
161{
162 return readl_relaxed(COUNTER_CFG_ADDR(idx));
163}
164
165static void l2x0pmu_write_cfg(u32 val, int idx)
166{
167 writel_relaxed(val, COUNTER_CFG_ADDR(idx));
168}
169
170static void l2x0pmu_enable_counter(u32 cfg, int idx)
171{
172 cfg |= L2X0_EVENT_CNT_CFG_INTR_OVERFLOW;
173 l2x0pmu_write_cfg(cfg, idx);
174}
175
176static u32 l2x0pmu_disable_counter(int idx)
177{
178 u32 cfg, oldcfg;
179
180 cfg = oldcfg = l2x0pmu_read_cfg(idx);
181
182 cfg &= ~L2X0_EVENT_CNT_CFG_MASK;
183 cfg &= ~L2X0_EVENT_CNT_CFG_INTR_MASK;
184 l2x0pmu_write_cfg(cfg, idx);
185
186 return oldcfg;
187}
188
189static u32 l2x0pmu_read_counter(int idx)
190{
191 u32 val = readl_relaxed(COUNTER_ADDR(idx));
192
193 return val;
194}
195
196static void l2x0pmu_write_counter(int idx, u32 val)
197{
198 /*
199 * L2X0 counters can only be written to when they are disabled.
200 * As perf core does not disable counters before writing to them
201 * under interrupts, we must do so here.
202 */
203 u32 cfg = l2x0pmu_disable_counter(idx);
204 writel_relaxed(val, COUNTER_ADDR(idx));
205 l2x0pmu_write_cfg(cfg, idx);
206}
207
208static int counter_is_saturated(int idx)
209{
210 return l2x0pmu_read_counter(idx) == 0xFFFFFFFF;
211}
212
213static void l2x0pmu_start(void)
214{
215 unsigned long flags;
216 u32 val;
217
218 raw_spin_lock_irqsave(&l2x0pmu_hw_events.pmu_lock, flags);
219
Ashwin Chaugule3deb6b82012-12-21 13:09:48 -0500220 if (!rev1)
221 l2x0_enable_counter_interrupt();
Ashwin Chaugule8b459cc2012-11-26 15:20:54 -0500222
223 val = l2x0pmu_read_ctrl();
224
225 val |= L2X0_EVENT_CNT_ENABLE;
226 l2x0pmu_write_ctrl(val);
227
228 raw_spin_unlock_irqrestore(&l2x0pmu_hw_events.pmu_lock, flags);
229}
230
231static void l2x0pmu_stop(void)
232{
233 unsigned long flags;
234 u32 val;
235
236 raw_spin_lock_irqsave(&l2x0pmu_hw_events.pmu_lock, flags);
237
238 val = l2x0pmu_read_ctrl();
239 val &= ~L2X0_EVENT_CNT_ENABLE_MASK;
240 l2x0pmu_write_ctrl(val);
241
Ashwin Chaugule3deb6b82012-12-21 13:09:48 -0500242 if (!rev1)
243 l2x0_disable_counter_interrupt();
Ashwin Chaugule8b459cc2012-11-26 15:20:54 -0500244
245 raw_spin_unlock_irqrestore(&l2x0pmu_hw_events.pmu_lock, flags);
246}
247
248static void l2x0pmu_enable(struct hw_perf_event *event, int idx, int cpu)
249{
250 unsigned long flags;
251 u32 cfg;
252
253 raw_spin_lock_irqsave(&l2x0pmu_hw_events.pmu_lock, flags);
254
255 cfg = (event->config_base << L2X0_EVENT_CNT_CFG_SHIFT) &
256 L2X0_EVENT_CNT_CFG_MASK;
257 l2x0pmu_enable_counter(cfg, idx);
258
259 raw_spin_unlock_irqrestore(&l2x0pmu_hw_events.pmu_lock, flags);
260}
261
262static void l2x0pmu_disable(struct hw_perf_event *event, int idx)
263{
264 unsigned long flags;
265
266 raw_spin_lock_irqsave(&l2x0pmu_hw_events.pmu_lock, flags);
267 l2x0pmu_disable_counter(idx);
268 raw_spin_unlock_irqrestore(&l2x0pmu_hw_events.pmu_lock, flags);
269}
270
271static int l2x0pmu_get_event_idx(struct pmu_hw_events *events,
272 struct hw_perf_event *hwc)
273{
274 int idx;
275
276 /* Counters are identical. Just grab a free one. */
277 for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
278 if (!test_and_set_bit(idx, l2x0pmu_hw_events.used_mask))
279 return idx;
280 }
281
282 return -EAGAIN;
283}
284
285/*
286 * As System PMUs are affine to CPU0, the fact that interrupts are disabled
287 * during interrupt handling is enough to serialise our actions and make this
288 * safe. We do not need to grab our pmu_lock here.
289 */
290static irqreturn_t l2x0pmu_handle_irq(int irq, void *dev)
291{
292 irqreturn_t status = IRQ_NONE;
293 struct perf_sample_data data;
294 struct pt_regs *regs;
295 int idx;
296
297 regs = get_irq_regs();
298
299 for (idx = 0; idx < L2X0_NUM_COUNTERS; ++idx) {
300 struct perf_event *event = l2x0pmu_hw_events.events[idx];
301 struct hw_perf_event *hwc;
302
303 if (!counter_is_saturated(idx))
304 continue;
305
306 status = IRQ_HANDLED;
307
308 hwc = &event->hw;
309
310 /*
311 * The armpmu_* functions expect counters to overflow, but
312 * L220/PL310 counters saturate instead. Fake the overflow
313 * here so the hardware is in sync with what the framework
314 * expects.
315 */
316 l2x0pmu_write_counter(idx, 0);
317
318 armpmu_event_update(event, hwc, idx);
319 data.period = event->hw.last_period;
320
321 if (!armpmu_event_set_period(event, hwc, idx))
322 continue;
323
324 if (perf_event_overflow(event, &data, regs))
325 l2x0pmu_disable_counter(idx);
326 }
327
328 l2x0_clear_interrupts(L2X0_INTR_MASK_ECNTR);
329
330 irq_work_run();
331
332 return status;
333}
334
335static int map_l2x0_raw_event(u64 config)
336{
337 return (config <= l2x0pmu_max_event_id) ? config : -ENOENT;
338}
339
340static int l2x0pmu_map_event(struct perf_event *event)
341{
342 u64 config = event->attr.config;
343 u64 supported_samples = (PERF_SAMPLE_TIME |
344 PERF_SAMPLE_ID |
345 PERF_SAMPLE_PERIOD |
346 PERF_SAMPLE_STREAM_ID |
347 PERF_SAMPLE_RAW);
348
349 if ((pmu_type == 0) || (pmu_type != event->attr.type))
350 return -ENOENT;
351
352 if (event->attr.sample_type & ~supported_samples)
353 return -ENOENT;
354
355 return map_l2x0_raw_event(config);
356}
357
358static int
359arm_l2_pmu_generic_request_irq(int irq, irq_handler_t *handle_irq)
360{
361 return request_irq(irq, *handle_irq,
362 IRQF_DISABLED | IRQF_NOBALANCING,
363 "arm-l2-armpmu", NULL);
364}
365
366static void
367arm_l2_pmu_generic_free_irq(int irq)
368{
369 if (irq >= 0)
370 free_irq(irq, NULL);
371}
372
373static struct arm_pmu l2x0_pmu = {
374 .id = ARM_PERF_PMU_ID_L2X0,
375 .type = ARM_PMU_DEVICE_L2CC,
376 .name = "msm-l2",
377 .start = l2x0pmu_start,
378 .stop = l2x0pmu_stop,
379 .handle_irq = l2x0pmu_handle_irq,
380 .enable = l2x0pmu_enable,
381 .disable = l2x0pmu_disable,
382 .get_event_idx = l2x0pmu_get_event_idx,
383 .read_counter = l2x0pmu_read_counter,
384 .write_counter = l2x0pmu_write_counter,
385 .map_event = l2x0pmu_map_event,
386 .num_events = 2,
387 .max_period = 0xFFFFFFFF,
388 .get_hw_events = l2x0pmu_get_hw_events,
389 .pmu.attr_groups = arm_l2_pmu_attr_grps,
390 .request_pmu_irq = arm_l2_pmu_generic_request_irq,
391 .free_pmu_irq = arm_l2_pmu_generic_free_irq,
392};
393
394static int __devinit l2x0pmu_device_probe(struct platform_device *pdev)
395{
396 u32 aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
397 u32 debug = readl_relaxed(l2x0_base + L2X0_DEBUG_CTRL);
398 l2x0_pmu.plat_device = pdev;
399
400 if (!(aux & (1 << L2X0_AUX_CTRL_EVENT_MONITOR_SHIFT))) {
401 pr_err("Ev Monitor is OFF. L2 counters disabled.\n");
402 return -EOPNOTSUPP;
403 }
404
405 pr_info("L2CC PMU device found. DEBUG_CTRL: %x\n", debug);
406
407 /* Get value of dynamically allocated PMU type. */
408 if (!armpmu_register(&l2x0_pmu, "msm-l2", -1))
409 pmu_type = l2x0_pmu.pmu.type;
410 else {
411 pr_err("l2x0_pmu registration failed\n");
412 return -EOPNOTSUPP;
413 }
414
415 return 0;
416}
417
418static struct platform_driver l2x0pmu_driver = {
419 .driver = {
420 .name = "l2-arm-pmu",
421 },
422 .probe = l2x0pmu_device_probe,
423};
424
425static int __init register_pmu_driver(void)
426{
Ashwin Chaugule3deb6b82012-12-21 13:09:48 -0500427 if (machine_is_msm9625())
428 rev1 = 1;
429
Ashwin Chaugule8b459cc2012-11-26 15:20:54 -0500430 return platform_driver_register(&l2x0pmu_driver);
431}
432device_initcall(register_pmu_driver);