blob: 192e7f59245e5b403ed31d75d83b08ccf75c17c3 [file] [log] [blame]
David Daneye5dcb582011-09-24 02:29:55 +02001/*
2 * Linux performance counter support for MIPS.
3 *
4 * Copyright (C) 2010 MIPS Technologies, Inc.
David Daney82091562011-09-24 02:29:55 +02005 * Copyright (C) 2011 Cavium Networks, Inc.
David Daneye5dcb582011-09-24 02:29:55 +02006 * Author: Deng-Cheng Zhu
7 *
8 * This code is based on the implementation for ARM, which is in turn
9 * based on the sparc64 perf event code and the x86 code. Performance
10 * counter access is based on the MIPS Oprofile code. And the callchain
11 * support references the code of MIPS stacktrace.c.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18#include <linux/cpumask.h>
19#include <linux/interrupt.h>
20#include <linux/smp.h>
21#include <linux/kernel.h>
22#include <linux/perf_event.h>
23#include <linux/uaccess.h>
24
25#include <asm/irq.h>
26#include <asm/irq_regs.h>
27#include <asm/stacktrace.h>
28#include <asm/time.h> /* For perf_irq */
29
David Daneye5dcb582011-09-24 02:29:55 +020030#define MIPS_MAX_HWEVENTS 4
Al Coopera7911a82012-07-13 16:44:54 -040031#define MIPS_TCS_PER_COUNTER 2
32#define MIPS_CPUID_TO_COUNTER_MASK (MIPS_TCS_PER_COUNTER - 1)
David Daneye5dcb582011-09-24 02:29:55 +020033
34struct cpu_hw_events {
35 /* Array of events on this cpu. */
36 struct perf_event *events[MIPS_MAX_HWEVENTS];
37
38 /*
39 * Set the bit (indexed by the counter number) when the counter
40 * is used for an event.
41 */
42 unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
43
44 /*
David Daneye5dcb582011-09-24 02:29:55 +020045 * Software copy of the control register for each performance counter.
46 * MIPS CPUs vary in performance counters. They use this differently,
47 * and even may not use it.
48 */
49 unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
50};
51DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
52 .saved_ctrl = {0},
53};
54
55/* The description of MIPS performance events. */
56struct mips_perf_event {
57 unsigned int event_id;
58 /*
59 * MIPS performance counters are indexed starting from 0.
60 * CNTR_EVEN indicates the indexes of the counters to be used are
61 * even numbers.
62 */
63 unsigned int cntr_mask;
64 #define CNTR_EVEN 0x55555555
65 #define CNTR_ODD 0xaaaaaaaa
David Daney82091562011-09-24 02:29:55 +020066 #define CNTR_ALL 0xffffffff
David Daneye5dcb582011-09-24 02:29:55 +020067#ifdef CONFIG_MIPS_MT_SMP
68 enum {
69 T = 0,
70 V = 1,
71 P = 2,
72 } range;
73#else
74 #define T
75 #define V
76 #define P
77#endif
78};
79
80static struct mips_perf_event raw_event;
81static DEFINE_MUTEX(raw_event_mutex);
82
David Daneye5dcb582011-09-24 02:29:55 +020083#define C(x) PERF_COUNT_HW_CACHE_##x
84
85struct mips_pmu {
David Daney82091562011-09-24 02:29:55 +020086 u64 max_period;
87 u64 valid_count;
88 u64 overflow;
David Daneye5dcb582011-09-24 02:29:55 +020089 const char *name;
90 int irq;
David Daneye5dcb582011-09-24 02:29:55 +020091 u64 (*read_counter)(unsigned int idx);
92 void (*write_counter)(unsigned int idx, u64 val);
David Daneye5dcb582011-09-24 02:29:55 +020093 const struct mips_perf_event *(*map_raw_event)(u64 config);
94 const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
95 const struct mips_perf_event (*cache_event_map)
96 [PERF_COUNT_HW_CACHE_MAX]
97 [PERF_COUNT_HW_CACHE_OP_MAX]
98 [PERF_COUNT_HW_CACHE_RESULT_MAX];
99 unsigned int num_counters;
100};
101
David Daney82091562011-09-24 02:29:55 +0200102static struct mips_pmu mipspmu;
103
104#define M_CONFIG1_PC (1 << 4)
105
Ralf Baechle70342282013-01-22 12:59:30 +0100106#define M_PERFCTL_EXL (1 << 0)
107#define M_PERFCTL_KERNEL (1 << 1)
108#define M_PERFCTL_SUPERVISOR (1 << 2)
109#define M_PERFCTL_USER (1 << 3)
110#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
David Daney82091562011-09-24 02:29:55 +0200111#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
Ralf Baechle70342282013-01-22 12:59:30 +0100112#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
Al Coopera7911a82012-07-13 16:44:54 -0400113
114#ifdef CONFIG_CPU_BMIPS5000
115#define M_PERFCTL_MT_EN(filter) 0
116#else /* !CONFIG_CPU_BMIPS5000 */
David Daney82091562011-09-24 02:29:55 +0200117#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
Al Coopera7911a82012-07-13 16:44:54 -0400118#endif /* CONFIG_CPU_BMIPS5000 */
119
Ralf Baechle70342282013-01-22 12:59:30 +0100120#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
121#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
122#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
123#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
124#define M_PERFCTL_WIDE (1 << 30)
125#define M_PERFCTL_MORE (1 << 31)
126#define M_PERFCTL_TC (1 << 30)
David Daney82091562011-09-24 02:29:55 +0200127
128#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
129 M_PERFCTL_KERNEL | \
130 M_PERFCTL_USER | \
131 M_PERFCTL_SUPERVISOR | \
132 M_PERFCTL_INTERRUPT_ENABLE)
133
134#ifdef CONFIG_MIPS_MT_SMP
135#define M_PERFCTL_CONFIG_MASK 0x3fff801f
136#else
137#define M_PERFCTL_CONFIG_MASK 0x1f
138#endif
139#define M_PERFCTL_EVENT_MASK 0xfe0
140
141
Al Cooper399aaa22012-07-13 16:44:53 -0400142#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
David Daney82091562011-09-24 02:29:55 +0200143static int cpu_has_mipsmt_pertccounters;
144
145static DEFINE_RWLOCK(pmuint_rwlock);
146
Al Coopera7911a82012-07-13 16:44:54 -0400147#if defined(CONFIG_CPU_BMIPS5000)
148#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
149 0 : (smp_processor_id() & MIPS_CPUID_TO_COUNTER_MASK))
150#else
David Daney82091562011-09-24 02:29:55 +0200151/*
152 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
153 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
154 */
David Daney82091562011-09-24 02:29:55 +0200155#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
Al Coopera7911a82012-07-13 16:44:54 -0400156 0 : smp_processor_id())
157#endif
David Daney82091562011-09-24 02:29:55 +0200158
159/* Copied from op_model_mipsxx.c */
160static unsigned int vpe_shift(void)
161{
162 if (num_possible_cpus() > 1)
163 return 1;
164
165 return 0;
166}
167
168static unsigned int counters_total_to_per_cpu(unsigned int counters)
169{
170 return counters >> vpe_shift();
171}
172
Al Cooper399aaa22012-07-13 16:44:53 -0400173#else /* !CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
David Daney82091562011-09-24 02:29:55 +0200174#define vpe_id() 0
175
Al Cooper399aaa22012-07-13 16:44:53 -0400176#endif /* CONFIG_MIPS_PERF_SHARED_TC_COUNTERS */
David Daney82091562011-09-24 02:29:55 +0200177
178static void resume_local_counters(void);
179static void pause_local_counters(void);
180static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
181static int mipsxx_pmu_handle_shared_irq(void);
182
183static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
184{
185 if (vpe_id() == 1)
186 idx = (idx + 2) & 3;
187 return idx;
188}
189
190static u64 mipsxx_pmu_read_counter(unsigned int idx)
191{
192 idx = mipsxx_pmu_swizzle_perf_idx(idx);
193
194 switch (idx) {
195 case 0:
196 /*
197 * The counters are unsigned, we must cast to truncate
198 * off the high bits.
199 */
200 return (u32)read_c0_perfcntr0();
201 case 1:
202 return (u32)read_c0_perfcntr1();
203 case 2:
204 return (u32)read_c0_perfcntr2();
205 case 3:
206 return (u32)read_c0_perfcntr3();
207 default:
208 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
209 return 0;
210 }
211}
212
213static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
214{
215 idx = mipsxx_pmu_swizzle_perf_idx(idx);
216
217 switch (idx) {
218 case 0:
219 return read_c0_perfcntr0_64();
220 case 1:
221 return read_c0_perfcntr1_64();
222 case 2:
223 return read_c0_perfcntr2_64();
224 case 3:
225 return read_c0_perfcntr3_64();
226 default:
227 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
228 return 0;
229 }
230}
231
232static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
233{
234 idx = mipsxx_pmu_swizzle_perf_idx(idx);
235
236 switch (idx) {
237 case 0:
238 write_c0_perfcntr0(val);
239 return;
240 case 1:
241 write_c0_perfcntr1(val);
242 return;
243 case 2:
244 write_c0_perfcntr2(val);
245 return;
246 case 3:
247 write_c0_perfcntr3(val);
248 return;
249 }
250}
251
252static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
253{
254 idx = mipsxx_pmu_swizzle_perf_idx(idx);
255
256 switch (idx) {
257 case 0:
258 write_c0_perfcntr0_64(val);
259 return;
260 case 1:
261 write_c0_perfcntr1_64(val);
262 return;
263 case 2:
264 write_c0_perfcntr2_64(val);
265 return;
266 case 3:
267 write_c0_perfcntr3_64(val);
268 return;
269 }
270}
271
272static unsigned int mipsxx_pmu_read_control(unsigned int idx)
273{
274 idx = mipsxx_pmu_swizzle_perf_idx(idx);
275
276 switch (idx) {
277 case 0:
278 return read_c0_perfctrl0();
279 case 1:
280 return read_c0_perfctrl1();
281 case 2:
282 return read_c0_perfctrl2();
283 case 3:
284 return read_c0_perfctrl3();
285 default:
286 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
287 return 0;
288 }
289}
290
291static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
292{
293 idx = mipsxx_pmu_swizzle_perf_idx(idx);
294
295 switch (idx) {
296 case 0:
297 write_c0_perfctrl0(val);
298 return;
299 case 1:
300 write_c0_perfctrl1(val);
301 return;
302 case 2:
303 write_c0_perfctrl2(val);
304 return;
305 case 3:
306 write_c0_perfctrl3(val);
307 return;
308 }
309}
310
311static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
312 struct hw_perf_event *hwc)
313{
314 int i;
315
316 /*
317 * We only need to care the counter mask. The range has been
318 * checked definitely.
319 */
320 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
321
322 for (i = mipspmu.num_counters - 1; i >= 0; i--) {
323 /*
324 * Note that some MIPS perf events can be counted by both
325 * even and odd counters, wheresas many other are only by
326 * even _or_ odd counters. This introduces an issue that
327 * when the former kind of event takes the counter the
328 * latter kind of event wants to use, then the "counter
329 * allocation" for the latter event will fail. In fact if
330 * they can be dynamically swapped, they both feel happy.
331 * But here we leave this issue alone for now.
332 */
333 if (test_bit(i, &cntr_mask) &&
334 !test_and_set_bit(i, cpuc->used_mask))
335 return i;
336 }
337
338 return -EAGAIN;
339}
340
341static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
342{
Christoph Lameter35898712014-08-17 12:30:44 -0500343 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
David Daney82091562011-09-24 02:29:55 +0200344
345 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
346
347 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
348 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
349 /* Make sure interrupt enabled. */
350 M_PERFCTL_INTERRUPT_ENABLE;
Al Coopera7911a82012-07-13 16:44:54 -0400351 if (IS_ENABLED(CONFIG_CPU_BMIPS5000))
352 /* enable the counter for the calling thread */
353 cpuc->saved_ctrl[idx] |=
354 (1 << (12 + vpe_id())) | M_PERFCTL_TC;
355
David Daney82091562011-09-24 02:29:55 +0200356 /*
357 * We do not actually let the counter run. Leave it until start().
358 */
359}
360
361static void mipsxx_pmu_disable_event(int idx)
362{
Christoph Lameter35898712014-08-17 12:30:44 -0500363 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
David Daney82091562011-09-24 02:29:55 +0200364 unsigned long flags;
365
366 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
367
368 local_irq_save(flags);
369 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
370 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
371 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
372 local_irq_restore(flags);
373}
David Daneye5dcb582011-09-24 02:29:55 +0200374
375static int mipspmu_event_set_period(struct perf_event *event,
376 struct hw_perf_event *hwc,
377 int idx)
378{
David Daney82091562011-09-24 02:29:55 +0200379 u64 left = local64_read(&hwc->period_left);
380 u64 period = hwc->sample_period;
David Daneye5dcb582011-09-24 02:29:55 +0200381 int ret = 0;
David Daneye5dcb582011-09-24 02:29:55 +0200382
David Daney82091562011-09-24 02:29:55 +0200383 if (unlikely((left + period) & (1ULL << 63))) {
384 /* left underflowed by more than period. */
David Daneye5dcb582011-09-24 02:29:55 +0200385 left = period;
386 local64_set(&hwc->period_left, left);
387 hwc->last_period = period;
388 ret = 1;
David Daney82091562011-09-24 02:29:55 +0200389 } else if (unlikely((left + period) <= period)) {
390 /* left underflowed by less than period. */
David Daneye5dcb582011-09-24 02:29:55 +0200391 left += period;
392 local64_set(&hwc->period_left, left);
393 hwc->last_period = period;
394 ret = 1;
395 }
396
David Daney82091562011-09-24 02:29:55 +0200397 if (left > mipspmu.max_period) {
398 left = mipspmu.max_period;
399 local64_set(&hwc->period_left, left);
400 }
David Daneye5dcb582011-09-24 02:29:55 +0200401
David Daney82091562011-09-24 02:29:55 +0200402 local64_set(&hwc->prev_count, mipspmu.overflow - left);
David Daneye5dcb582011-09-24 02:29:55 +0200403
David Daney82091562011-09-24 02:29:55 +0200404 mipspmu.write_counter(idx, mipspmu.overflow - left);
David Daneye5dcb582011-09-24 02:29:55 +0200405
406 perf_event_update_userpage(event);
407
408 return ret;
409}
410
411static void mipspmu_event_update(struct perf_event *event,
412 struct hw_perf_event *hwc,
413 int idx)
414{
David Daney82091562011-09-24 02:29:55 +0200415 u64 prev_raw_count, new_raw_count;
David Daneye5dcb582011-09-24 02:29:55 +0200416 u64 delta;
417
418again:
419 prev_raw_count = local64_read(&hwc->prev_count);
David Daney82091562011-09-24 02:29:55 +0200420 new_raw_count = mipspmu.read_counter(idx);
David Daneye5dcb582011-09-24 02:29:55 +0200421
422 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
423 new_raw_count) != prev_raw_count)
424 goto again;
425
David Daney82091562011-09-24 02:29:55 +0200426 delta = new_raw_count - prev_raw_count;
David Daneye5dcb582011-09-24 02:29:55 +0200427
428 local64_add(delta, &event->count);
429 local64_sub(delta, &hwc->period_left);
430}
431
432static void mipspmu_start(struct perf_event *event, int flags)
433{
434 struct hw_perf_event *hwc = &event->hw;
435
David Daneye5dcb582011-09-24 02:29:55 +0200436 if (flags & PERF_EF_RELOAD)
437 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
438
439 hwc->state = 0;
440
441 /* Set the period for the event. */
442 mipspmu_event_set_period(event, hwc, hwc->idx);
443
444 /* Enable the event. */
David Daney82091562011-09-24 02:29:55 +0200445 mipsxx_pmu_enable_event(hwc, hwc->idx);
David Daneye5dcb582011-09-24 02:29:55 +0200446}
447
448static void mipspmu_stop(struct perf_event *event, int flags)
449{
450 struct hw_perf_event *hwc = &event->hw;
451
David Daneye5dcb582011-09-24 02:29:55 +0200452 if (!(hwc->state & PERF_HES_STOPPED)) {
453 /* We are working on a local event. */
David Daney82091562011-09-24 02:29:55 +0200454 mipsxx_pmu_disable_event(hwc->idx);
David Daneye5dcb582011-09-24 02:29:55 +0200455 barrier();
456 mipspmu_event_update(event, hwc, hwc->idx);
457 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
458 }
459}
460
461static int mipspmu_add(struct perf_event *event, int flags)
462{
Christoph Lameter35898712014-08-17 12:30:44 -0500463 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
David Daneye5dcb582011-09-24 02:29:55 +0200464 struct hw_perf_event *hwc = &event->hw;
465 int idx;
466 int err = 0;
467
468 perf_pmu_disable(event->pmu);
469
470 /* To look for a free counter for this event. */
David Daney82091562011-09-24 02:29:55 +0200471 idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
David Daneye5dcb582011-09-24 02:29:55 +0200472 if (idx < 0) {
473 err = idx;
474 goto out;
475 }
476
477 /*
478 * If there is an event in the counter we are going to use then
479 * make sure it is disabled.
480 */
481 event->hw.idx = idx;
David Daney82091562011-09-24 02:29:55 +0200482 mipsxx_pmu_disable_event(idx);
David Daneye5dcb582011-09-24 02:29:55 +0200483 cpuc->events[idx] = event;
484
485 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
486 if (flags & PERF_EF_START)
487 mipspmu_start(event, PERF_EF_RELOAD);
488
489 /* Propagate our changes to the userspace mapping. */
490 perf_event_update_userpage(event);
491
492out:
493 perf_pmu_enable(event->pmu);
494 return err;
495}
496
497static void mipspmu_del(struct perf_event *event, int flags)
498{
Christoph Lameter35898712014-08-17 12:30:44 -0500499 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
David Daneye5dcb582011-09-24 02:29:55 +0200500 struct hw_perf_event *hwc = &event->hw;
501 int idx = hwc->idx;
502
David Daney82091562011-09-24 02:29:55 +0200503 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
David Daneye5dcb582011-09-24 02:29:55 +0200504
505 mipspmu_stop(event, PERF_EF_UPDATE);
506 cpuc->events[idx] = NULL;
507 clear_bit(idx, cpuc->used_mask);
508
509 perf_event_update_userpage(event);
510}
511
512static void mipspmu_read(struct perf_event *event)
513{
514 struct hw_perf_event *hwc = &event->hw;
515
516 /* Don't read disabled counters! */
517 if (hwc->idx < 0)
518 return;
519
520 mipspmu_event_update(event, hwc, hwc->idx);
521}
522
523static void mipspmu_enable(struct pmu *pmu)
524{
Al Cooper399aaa22012-07-13 16:44:53 -0400525#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
David Daney82091562011-09-24 02:29:55 +0200526 write_unlock(&pmuint_rwlock);
527#endif
528 resume_local_counters();
David Daneye5dcb582011-09-24 02:29:55 +0200529}
530
David Daney82091562011-09-24 02:29:55 +0200531/*
532 * MIPS performance counters can be per-TC. The control registers can
533 * not be directly accessed accross CPUs. Hence if we want to do global
534 * control, we need cross CPU calls. on_each_cpu() can help us, but we
535 * can not make sure this function is called with interrupts enabled. So
536 * here we pause local counters and then grab a rwlock and leave the
537 * counters on other CPUs alone. If any counter interrupt raises while
538 * we own the write lock, simply pause local counters on that CPU and
539 * spin in the handler. Also we know we won't be switched to another
540 * CPU after pausing local counters and before grabbing the lock.
541 */
David Daneye5dcb582011-09-24 02:29:55 +0200542static void mipspmu_disable(struct pmu *pmu)
543{
David Daney82091562011-09-24 02:29:55 +0200544 pause_local_counters();
Al Cooper399aaa22012-07-13 16:44:53 -0400545#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
David Daney82091562011-09-24 02:29:55 +0200546 write_lock(&pmuint_rwlock);
547#endif
David Daneye5dcb582011-09-24 02:29:55 +0200548}
549
550static atomic_t active_events = ATOMIC_INIT(0);
551static DEFINE_MUTEX(pmu_reserve_mutex);
552static int (*save_perf_irq)(void);
553
554static int mipspmu_get_irq(void)
555{
556 int err;
557
David Daney82091562011-09-24 02:29:55 +0200558 if (mipspmu.irq >= 0) {
David Daneye5dcb582011-09-24 02:29:55 +0200559 /* Request my own irq handler. */
David Daney82091562011-09-24 02:29:55 +0200560 err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
James Hogana1ec0e12015-01-27 21:45:53 +0000561 IRQF_PERCPU | IRQF_NOBALANCING |
562 IRQF_NO_THREAD | IRQF_NO_SUSPEND |
563 IRQF_SHARED,
564 "mips_perf_pmu", &mipspmu);
David Daneye5dcb582011-09-24 02:29:55 +0200565 if (err) {
Joe Perches7178d2c2014-10-04 09:50:42 -0700566 pr_warn("Unable to request IRQ%d for MIPS performance counters!\n",
567 mipspmu.irq);
David Daneye5dcb582011-09-24 02:29:55 +0200568 }
569 } else if (cp0_perfcount_irq < 0) {
570 /*
571 * We are sharing the irq number with the timer interrupt.
572 */
573 save_perf_irq = perf_irq;
David Daney82091562011-09-24 02:29:55 +0200574 perf_irq = mipsxx_pmu_handle_shared_irq;
David Daneye5dcb582011-09-24 02:29:55 +0200575 err = 0;
576 } else {
Joe Perches7178d2c2014-10-04 09:50:42 -0700577 pr_warn("The platform hasn't properly defined its interrupt controller\n");
David Daneye5dcb582011-09-24 02:29:55 +0200578 err = -ENOENT;
579 }
580
581 return err;
582}
583
584static void mipspmu_free_irq(void)
585{
David Daney82091562011-09-24 02:29:55 +0200586 if (mipspmu.irq >= 0)
James Hogana1ec0e12015-01-27 21:45:53 +0000587 free_irq(mipspmu.irq, &mipspmu);
David Daneye5dcb582011-09-24 02:29:55 +0200588 else if (cp0_perfcount_irq < 0)
589 perf_irq = save_perf_irq;
590}
591
592/*
593 * mipsxx/rm9000/loongson2 have different performance counters, they have
594 * specific low-level init routines.
595 */
596static void reset_counters(void *arg);
597static int __hw_perf_event_init(struct perf_event *event);
598
599static void hw_perf_event_destroy(struct perf_event *event)
600{
601 if (atomic_dec_and_mutex_lock(&active_events,
602 &pmu_reserve_mutex)) {
603 /*
604 * We must not call the destroy function with interrupts
605 * disabled.
606 */
607 on_each_cpu(reset_counters,
David Daney82091562011-09-24 02:29:55 +0200608 (void *)(long)mipspmu.num_counters, 1);
David Daneye5dcb582011-09-24 02:29:55 +0200609 mipspmu_free_irq();
610 mutex_unlock(&pmu_reserve_mutex);
611 }
612}
613
614static int mipspmu_event_init(struct perf_event *event)
615{
616 int err = 0;
617
Stephane Eranian2481c5f2012-02-09 23:20:59 +0100618 /* does not support taken branch sampling */
619 if (has_branch_stack(event))
620 return -EOPNOTSUPP;
621
David Daneye5dcb582011-09-24 02:29:55 +0200622 switch (event->attr.type) {
623 case PERF_TYPE_RAW:
624 case PERF_TYPE_HARDWARE:
625 case PERF_TYPE_HW_CACHE:
626 break;
627
628 default:
629 return -ENOENT;
630 }
631
David Daney82091562011-09-24 02:29:55 +0200632 if (event->cpu >= nr_cpumask_bits ||
633 (event->cpu >= 0 && !cpu_online(event->cpu)))
David Daneye5dcb582011-09-24 02:29:55 +0200634 return -ENODEV;
635
636 if (!atomic_inc_not_zero(&active_events)) {
David Daneye5dcb582011-09-24 02:29:55 +0200637 mutex_lock(&pmu_reserve_mutex);
638 if (atomic_read(&active_events) == 0)
639 err = mipspmu_get_irq();
640
641 if (!err)
642 atomic_inc(&active_events);
643 mutex_unlock(&pmu_reserve_mutex);
644 }
645
646 if (err)
647 return err;
648
Deng-Cheng Zhuff5d7262011-11-22 03:28:48 +0800649 return __hw_perf_event_init(event);
David Daneye5dcb582011-09-24 02:29:55 +0200650}
651
652static struct pmu pmu = {
653 .pmu_enable = mipspmu_enable,
654 .pmu_disable = mipspmu_disable,
655 .event_init = mipspmu_event_init,
656 .add = mipspmu_add,
657 .del = mipspmu_del,
658 .start = mipspmu_start,
659 .stop = mipspmu_stop,
660 .read = mipspmu_read,
661};
662
663static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
664{
665/*
666 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
667 * event_id.
668 */
669#ifdef CONFIG_MIPS_MT_SMP
670 return ((unsigned int)pev->range << 24) |
671 (pev->cntr_mask & 0xffff00) |
672 (pev->event_id & 0xff);
673#else
674 return (pev->cntr_mask & 0xffff00) |
675 (pev->event_id & 0xff);
676#endif
677}
678
679static const struct mips_perf_event *mipspmu_map_general_event(int idx)
680{
David Daneye5dcb582011-09-24 02:29:55 +0200681
Al Cooperc5600b22012-07-13 16:44:50 -0400682 if ((*mipspmu.general_event_map)[idx].cntr_mask == 0)
683 return ERR_PTR(-EOPNOTSUPP);
684 return &(*mipspmu.general_event_map)[idx];
David Daneye5dcb582011-09-24 02:29:55 +0200685}
686
687static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
688{
689 unsigned int cache_type, cache_op, cache_result;
690 const struct mips_perf_event *pev;
691
692 cache_type = (config >> 0) & 0xff;
693 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
694 return ERR_PTR(-EINVAL);
695
696 cache_op = (config >> 8) & 0xff;
697 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
698 return ERR_PTR(-EINVAL);
699
700 cache_result = (config >> 16) & 0xff;
701 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
702 return ERR_PTR(-EINVAL);
703
David Daney82091562011-09-24 02:29:55 +0200704 pev = &((*mipspmu.cache_event_map)
David Daneye5dcb582011-09-24 02:29:55 +0200705 [cache_type]
706 [cache_op]
707 [cache_result]);
708
Al Cooperc5600b22012-07-13 16:44:50 -0400709 if (pev->cntr_mask == 0)
David Daneye5dcb582011-09-24 02:29:55 +0200710 return ERR_PTR(-EOPNOTSUPP);
711
712 return pev;
713
714}
715
David Daneye5dcb582011-09-24 02:29:55 +0200716static int validate_group(struct perf_event *event)
717{
718 struct perf_event *sibling, *leader = event->group_leader;
719 struct cpu_hw_events fake_cpuc;
720
721 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
722
Deng-Cheng Zhu266623b72011-11-22 03:28:47 +0800723 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100724 return -EINVAL;
David Daneye5dcb582011-09-24 02:29:55 +0200725
726 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
Deng-Cheng Zhu266623b72011-11-22 03:28:47 +0800727 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100728 return -EINVAL;
David Daneye5dcb582011-09-24 02:29:55 +0200729 }
730
Deng-Cheng Zhu266623b72011-11-22 03:28:47 +0800731 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100732 return -EINVAL;
David Daneye5dcb582011-09-24 02:29:55 +0200733
734 return 0;
735}
736
737/* This is needed by specific irq handlers in perf_event_*.c */
738static void handle_associated_event(struct cpu_hw_events *cpuc,
739 int idx, struct perf_sample_data *data,
740 struct pt_regs *regs)
741{
742 struct perf_event *event = cpuc->events[idx];
743 struct hw_perf_event *hwc = &event->hw;
744
745 mipspmu_event_update(event, hwc, idx);
746 data->period = event->hw.last_period;
747 if (!mipspmu_event_set_period(event, hwc, idx))
748 return;
749
750 if (perf_event_overflow(event, data, regs))
David Daney82091562011-09-24 02:29:55 +0200751 mipsxx_pmu_disable_event(idx);
David Daneye5dcb582011-09-24 02:29:55 +0200752}
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800753
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800754
David Daney4409af32011-09-24 02:29:55 +0200755static int __n_counters(void)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800756{
757 if (!(read_c0_config1() & M_CONFIG1_PC))
758 return 0;
759 if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
760 return 1;
761 if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
762 return 2;
763 if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
764 return 3;
765
766 return 4;
767}
768
David Daney4409af32011-09-24 02:29:55 +0200769static int n_counters(void)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800770{
771 int counters;
772
773 switch (current_cpu_type()) {
774 case CPU_R10000:
775 counters = 2;
776 break;
777
778 case CPU_R12000:
779 case CPU_R14000:
Joshua Kinard30577392015-01-21 07:59:45 -0500780 case CPU_R16000:
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800781 counters = 4;
782 break;
783
784 default:
785 counters = __n_counters();
786 }
787
788 return counters;
789}
790
791static void reset_counters(void *arg)
792{
793 int counters = (int)(long)arg;
794 switch (counters) {
795 case 4:
David Daney82091562011-09-24 02:29:55 +0200796 mipsxx_pmu_write_control(3, 0);
797 mipspmu.write_counter(3, 0);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800798 case 3:
David Daney82091562011-09-24 02:29:55 +0200799 mipsxx_pmu_write_control(2, 0);
800 mipspmu.write_counter(2, 0);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800801 case 2:
David Daney82091562011-09-24 02:29:55 +0200802 mipsxx_pmu_write_control(1, 0);
803 mipspmu.write_counter(1, 0);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800804 case 1:
David Daney82091562011-09-24 02:29:55 +0200805 mipsxx_pmu_write_control(0, 0);
806 mipspmu.write_counter(0, 0);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800807 }
808}
809
Deng-Cheng Zhu9597e432014-02-10 09:48:54 -0800810/* 24K/34K/1004K/interAptiv/loongson1 cores share the same event map. */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800811static const struct mips_perf_event mipsxxcore_event_map
812 [PERF_COUNT_HW_MAX] = {
813 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
814 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800815 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
816 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800817};
818
Deng-Cheng Zhuc52068b2014-02-10 09:48:53 -0800819/* 74K/proAptiv core has different branch event code. */
Deng-Cheng Zhu6b0b84292014-02-10 09:48:52 -0800820static const struct mips_perf_event mipsxxcore_event_map2
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800821 [PERF_COUNT_HW_MAX] = {
822 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
823 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800824 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
825 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800826};
827
David Daney939991c2011-09-24 02:29:55 +0200828static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
829 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
830 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
831 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
Ralf Baechle70342282013-01-22 12:59:30 +0100832 [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
David Daney939991c2011-09-24 02:29:55 +0200833 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
834 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
835 [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
836};
837
Al Coopera7911a82012-07-13 16:44:54 -0400838static const struct mips_perf_event bmips5000_event_map
839 [PERF_COUNT_HW_MAX] = {
840 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, T },
841 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
842 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
843};
844
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +0000845static const struct mips_perf_event xlp_event_map[PERF_COUNT_HW_MAX] = {
846 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
847 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x18, CNTR_ALL }, /* PAPI_TOT_INS */
848 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
849 [PERF_COUNT_HW_CACHE_MISSES] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
850 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x1b, CNTR_ALL }, /* PAPI_BR_CN */
851 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x1c, CNTR_ALL }, /* PAPI_BR_MSP */
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +0000852};
853
Deng-Cheng Zhu9597e432014-02-10 09:48:54 -0800854/* 24K/34K/1004K/interAptiv/loongson1 cores share the same cache event map. */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800855static const struct mips_perf_event mipsxxcore_cache_map
856 [PERF_COUNT_HW_CACHE_MAX]
857 [PERF_COUNT_HW_CACHE_OP_MAX]
858 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
859[C(L1D)] = {
860 /*
861 * Like some other architectures (e.g. ARM), the performance
862 * counters don't differentiate between read and write
863 * accesses/misses, so this isn't strictly correct, but it's the
864 * best we can do. Writes and reads get combined.
865 */
866 [C(OP_READ)] = {
867 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
868 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
869 },
870 [C(OP_WRITE)] = {
871 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
872 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
873 },
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800874},
875[C(L1I)] = {
876 [C(OP_READ)] = {
877 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
878 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
879 },
880 [C(OP_WRITE)] = {
881 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
882 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
883 },
884 [C(OP_PREFETCH)] = {
885 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
886 /*
887 * Note that MIPS has only "hit" events countable for
888 * the prefetch operation.
889 */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800890 },
891},
892[C(LL)] = {
893 [C(OP_READ)] = {
894 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
895 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
896 },
897 [C(OP_WRITE)] = {
898 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
899 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
900 },
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800901},
902[C(DTLB)] = {
903 [C(OP_READ)] = {
904 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
905 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
906 },
907 [C(OP_WRITE)] = {
908 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
909 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
910 },
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800911},
912[C(ITLB)] = {
913 [C(OP_READ)] = {
914 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
915 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
916 },
917 [C(OP_WRITE)] = {
918 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
919 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
920 },
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800921},
922[C(BPU)] = {
923 /* Using the same code for *HW_BRANCH* */
924 [C(OP_READ)] = {
925 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
926 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
927 },
928 [C(OP_WRITE)] = {
929 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
930 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
931 },
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200932},
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800933};
934
Deng-Cheng Zhuc52068b2014-02-10 09:48:53 -0800935/* 74K/proAptiv core has completely different cache event map. */
Deng-Cheng Zhu6b0b84292014-02-10 09:48:52 -0800936static const struct mips_perf_event mipsxxcore_cache_map2
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800937 [PERF_COUNT_HW_CACHE_MAX]
938 [PERF_COUNT_HW_CACHE_OP_MAX]
939 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
940[C(L1D)] = {
941 /*
942 * Like some other architectures (e.g. ARM), the performance
943 * counters don't differentiate between read and write
944 * accesses/misses, so this isn't strictly correct, but it's the
945 * best we can do. Writes and reads get combined.
946 */
947 [C(OP_READ)] = {
948 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
949 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
950 },
951 [C(OP_WRITE)] = {
952 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
953 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
954 },
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800955},
956[C(L1I)] = {
957 [C(OP_READ)] = {
958 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
959 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
960 },
961 [C(OP_WRITE)] = {
962 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
963 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
964 },
965 [C(OP_PREFETCH)] = {
966 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
967 /*
968 * Note that MIPS has only "hit" events countable for
969 * the prefetch operation.
970 */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800971 },
972},
973[C(LL)] = {
974 [C(OP_READ)] = {
975 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
Deng-Cheng Zhu7f081f12013-10-08 16:17:48 +0100976 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800977 },
978 [C(OP_WRITE)] = {
979 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
Deng-Cheng Zhu7f081f12013-10-08 16:17:48 +0100980 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN, P },
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800981 },
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800982},
Deng-Cheng Zhuc52068b2014-02-10 09:48:53 -0800983/*
984 * 74K core does not have specific DTLB events. proAptiv core has
985 * "speculative" DTLB events which are numbered 0x63 (even/odd) and
986 * not included here. One can use raw events if really needed.
987 */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800988[C(ITLB)] = {
989 [C(OP_READ)] = {
990 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
991 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
992 },
993 [C(OP_WRITE)] = {
994 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
995 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
996 },
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800997},
998[C(BPU)] = {
999 /* Using the same code for *HW_BRANCH* */
1000 [C(OP_READ)] = {
1001 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1002 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1003 },
1004 [C(OP_WRITE)] = {
1005 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1006 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1007 },
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +02001008},
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001009};
1010
Al Coopera7911a82012-07-13 16:44:54 -04001011/* BMIPS5000 */
1012static const struct mips_perf_event bmips5000_cache_map
1013 [PERF_COUNT_HW_CACHE_MAX]
1014 [PERF_COUNT_HW_CACHE_OP_MAX]
1015 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1016[C(L1D)] = {
1017 /*
1018 * Like some other architectures (e.g. ARM), the performance
1019 * counters don't differentiate between read and write
1020 * accesses/misses, so this isn't strictly correct, but it's the
1021 * best we can do. Writes and reads get combined.
1022 */
1023 [C(OP_READ)] = {
1024 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1025 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1026 },
1027 [C(OP_WRITE)] = {
1028 [C(RESULT_ACCESS)] = { 12, CNTR_EVEN, T },
1029 [C(RESULT_MISS)] = { 12, CNTR_ODD, T },
1030 },
1031},
1032[C(L1I)] = {
1033 [C(OP_READ)] = {
1034 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1035 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1036 },
1037 [C(OP_WRITE)] = {
1038 [C(RESULT_ACCESS)] = { 10, CNTR_EVEN, T },
1039 [C(RESULT_MISS)] = { 10, CNTR_ODD, T },
1040 },
1041 [C(OP_PREFETCH)] = {
1042 [C(RESULT_ACCESS)] = { 23, CNTR_EVEN, T },
1043 /*
1044 * Note that MIPS has only "hit" events countable for
1045 * the prefetch operation.
1046 */
1047 },
1048},
1049[C(LL)] = {
1050 [C(OP_READ)] = {
1051 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1052 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1053 },
1054 [C(OP_WRITE)] = {
1055 [C(RESULT_ACCESS)] = { 28, CNTR_EVEN, P },
1056 [C(RESULT_MISS)] = { 28, CNTR_ODD, P },
1057 },
1058},
1059[C(BPU)] = {
1060 /* Using the same code for *HW_BRANCH* */
1061 [C(OP_READ)] = {
1062 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1063 },
1064 [C(OP_WRITE)] = {
1065 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
1066 },
1067},
1068};
1069
David Daney939991c2011-09-24 02:29:55 +02001070
1071static const struct mips_perf_event octeon_cache_map
1072 [PERF_COUNT_HW_CACHE_MAX]
1073 [PERF_COUNT_HW_CACHE_OP_MAX]
1074 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1075[C(L1D)] = {
1076 [C(OP_READ)] = {
1077 [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
1078 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
1079 },
1080 [C(OP_WRITE)] = {
1081 [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
David Daney939991c2011-09-24 02:29:55 +02001082 },
1083},
1084[C(L1I)] = {
1085 [C(OP_READ)] = {
1086 [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
David Daney939991c2011-09-24 02:29:55 +02001087 },
1088 [C(OP_PREFETCH)] = {
1089 [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
David Daney939991c2011-09-24 02:29:55 +02001090 },
1091},
1092[C(DTLB)] = {
1093 /*
1094 * Only general DTLB misses are counted use the same event for
1095 * read and write.
1096 */
1097 [C(OP_READ)] = {
David Daney939991c2011-09-24 02:29:55 +02001098 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1099 },
1100 [C(OP_WRITE)] = {
David Daney939991c2011-09-24 02:29:55 +02001101 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1102 },
David Daney939991c2011-09-24 02:29:55 +02001103},
1104[C(ITLB)] = {
1105 [C(OP_READ)] = {
David Daney939991c2011-09-24 02:29:55 +02001106 [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1107 },
David Daney939991c2011-09-24 02:29:55 +02001108},
1109};
1110
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001111static const struct mips_perf_event xlp_cache_map
1112 [PERF_COUNT_HW_CACHE_MAX]
1113 [PERF_COUNT_HW_CACHE_OP_MAX]
1114 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1115[C(L1D)] = {
1116 [C(OP_READ)] = {
1117 [C(RESULT_ACCESS)] = { 0x31, CNTR_ALL }, /* PAPI_L1_DCR */
1118 [C(RESULT_MISS)] = { 0x30, CNTR_ALL }, /* PAPI_L1_LDM */
1119 },
1120 [C(OP_WRITE)] = {
1121 [C(RESULT_ACCESS)] = { 0x2f, CNTR_ALL }, /* PAPI_L1_DCW */
1122 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL }, /* PAPI_L1_STM */
1123 },
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001124},
1125[C(L1I)] = {
1126 [C(OP_READ)] = {
1127 [C(RESULT_ACCESS)] = { 0x04, CNTR_ALL }, /* PAPI_L1_ICA */
1128 [C(RESULT_MISS)] = { 0x07, CNTR_ALL }, /* PAPI_L1_ICM */
1129 },
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001130},
1131[C(LL)] = {
1132 [C(OP_READ)] = {
1133 [C(RESULT_ACCESS)] = { 0x35, CNTR_ALL }, /* PAPI_L2_DCR */
1134 [C(RESULT_MISS)] = { 0x37, CNTR_ALL }, /* PAPI_L2_LDM */
1135 },
1136 [C(OP_WRITE)] = {
1137 [C(RESULT_ACCESS)] = { 0x34, CNTR_ALL }, /* PAPI_L2_DCA */
1138 [C(RESULT_MISS)] = { 0x36, CNTR_ALL }, /* PAPI_L2_DCM */
1139 },
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001140},
1141[C(DTLB)] = {
1142 /*
1143 * Only general DTLB misses are counted use the same event for
1144 * read and write.
1145 */
1146 [C(OP_READ)] = {
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001147 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1148 },
1149 [C(OP_WRITE)] = {
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001150 [C(RESULT_MISS)] = { 0x2d, CNTR_ALL }, /* PAPI_TLB_DM */
1151 },
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001152},
1153[C(ITLB)] = {
1154 [C(OP_READ)] = {
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001155 [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1156 },
1157 [C(OP_WRITE)] = {
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001158 [C(RESULT_MISS)] = { 0x08, CNTR_ALL }, /* PAPI_TLB_IM */
1159 },
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001160},
1161[C(BPU)] = {
1162 [C(OP_READ)] = {
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001163 [C(RESULT_MISS)] = { 0x25, CNTR_ALL },
1164 },
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001165},
1166};
1167
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001168#ifdef CONFIG_MIPS_MT_SMP
David Daney4409af32011-09-24 02:29:55 +02001169static void check_and_calc_range(struct perf_event *event,
1170 const struct mips_perf_event *pev)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001171{
1172 struct hw_perf_event *hwc = &event->hw;
1173
1174 if (event->cpu >= 0) {
1175 if (pev->range > V) {
1176 /*
1177 * The user selected an event that is processor
1178 * wide, while expecting it to be VPE wide.
1179 */
1180 hwc->config_base |= M_TC_EN_ALL;
1181 } else {
1182 /*
1183 * FIXME: cpu_data[event->cpu].vpe_id reports 0
1184 * for both CPUs.
1185 */
1186 hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
1187 hwc->config_base |= M_TC_EN_VPE;
1188 }
1189 } else
1190 hwc->config_base |= M_TC_EN_ALL;
1191}
1192#else
David Daney4409af32011-09-24 02:29:55 +02001193static void check_and_calc_range(struct perf_event *event,
1194 const struct mips_perf_event *pev)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001195{
1196}
1197#endif
1198
1199static int __hw_perf_event_init(struct perf_event *event)
1200{
1201 struct perf_event_attr *attr = &event->attr;
1202 struct hw_perf_event *hwc = &event->hw;
1203 const struct mips_perf_event *pev;
1204 int err;
1205
1206 /* Returning MIPS event descriptor for generic perf event. */
1207 if (PERF_TYPE_HARDWARE == event->attr.type) {
1208 if (event->attr.config >= PERF_COUNT_HW_MAX)
1209 return -EINVAL;
1210 pev = mipspmu_map_general_event(event->attr.config);
1211 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1212 pev = mipspmu_map_cache_event(event->attr.config);
1213 } else if (PERF_TYPE_RAW == event->attr.type) {
1214 /* We are working on the global raw event. */
1215 mutex_lock(&raw_event_mutex);
David Daney82091562011-09-24 02:29:55 +02001216 pev = mipspmu.map_raw_event(event->attr.config);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001217 } else {
1218 /* The event type is not (yet) supported. */
1219 return -EOPNOTSUPP;
1220 }
1221
1222 if (IS_ERR(pev)) {
1223 if (PERF_TYPE_RAW == event->attr.type)
1224 mutex_unlock(&raw_event_mutex);
1225 return PTR_ERR(pev);
1226 }
1227
1228 /*
1229 * We allow max flexibility on how each individual counter shared
1230 * by the single CPU operates (the mode exclusion and the range).
1231 */
1232 hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
1233
1234 /* Calculate range bits and validate it. */
1235 if (num_possible_cpus() > 1)
1236 check_and_calc_range(event, pev);
1237
1238 hwc->event_base = mipspmu_perf_event_encode(pev);
1239 if (PERF_TYPE_RAW == event->attr.type)
1240 mutex_unlock(&raw_event_mutex);
1241
1242 if (!attr->exclude_user)
1243 hwc->config_base |= M_PERFCTL_USER;
1244 if (!attr->exclude_kernel) {
1245 hwc->config_base |= M_PERFCTL_KERNEL;
1246 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1247 hwc->config_base |= M_PERFCTL_EXL;
1248 }
1249 if (!attr->exclude_hv)
1250 hwc->config_base |= M_PERFCTL_SUPERVISOR;
1251
1252 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1253 /*
1254 * The event can belong to another cpu. We do not assign a local
1255 * counter for it for now.
1256 */
1257 hwc->idx = -1;
1258 hwc->config = 0;
1259
1260 if (!hwc->sample_period) {
David Daney82091562011-09-24 02:29:55 +02001261 hwc->sample_period = mipspmu.max_period;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001262 hwc->last_period = hwc->sample_period;
1263 local64_set(&hwc->period_left, hwc->sample_period);
1264 }
1265
1266 err = 0;
Deng-Cheng Zhuff5d7262011-11-22 03:28:48 +08001267 if (event->group_leader != event)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001268 err = validate_group(event);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001269
1270 event->destroy = hw_perf_event_destroy;
Deng-Cheng Zhuff5d7262011-11-22 03:28:48 +08001271
1272 if (err)
1273 event->destroy(event);
1274
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001275 return err;
1276}
1277
1278static void pause_local_counters(void)
1279{
Christoph Lameter35898712014-08-17 12:30:44 -05001280 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
David Daney82091562011-09-24 02:29:55 +02001281 int ctr = mipspmu.num_counters;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001282 unsigned long flags;
1283
1284 local_irq_save(flags);
David Daney82091562011-09-24 02:29:55 +02001285 do {
1286 ctr--;
1287 cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1288 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1289 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1290 } while (ctr > 0);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001291 local_irq_restore(flags);
1292}
1293
1294static void resume_local_counters(void)
1295{
Christoph Lameter35898712014-08-17 12:30:44 -05001296 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
David Daney82091562011-09-24 02:29:55 +02001297 int ctr = mipspmu.num_counters;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001298
David Daney82091562011-09-24 02:29:55 +02001299 do {
1300 ctr--;
1301 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1302 } while (ctr > 0);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001303}
1304
1305static int mipsxx_pmu_handle_shared_irq(void)
1306{
Christoph Lameter35898712014-08-17 12:30:44 -05001307 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001308 struct perf_sample_data data;
David Daney82091562011-09-24 02:29:55 +02001309 unsigned int counters = mipspmu.num_counters;
1310 u64 counter;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001311 int handled = IRQ_NONE;
1312 struct pt_regs *regs;
1313
Al Cooperda4b62c2012-07-13 16:44:51 -04001314 if (cpu_has_perf_cntr_intr_bit && !(read_c0_cause() & CAUSEF_PCI))
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001315 return handled;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001316 /*
1317 * First we pause the local counters, so that when we are locked
1318 * here, the counters are all paused. When it gets locked due to
1319 * perf_disable(), the timer interrupt handler will be delayed.
1320 *
1321 * See also mipsxx_pmu_start().
1322 */
1323 pause_local_counters();
Al Cooper399aaa22012-07-13 16:44:53 -04001324#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001325 read_lock(&pmuint_rwlock);
1326#endif
1327
1328 regs = get_irq_regs();
1329
Robert Richterfd0d0002012-04-02 20:19:08 +02001330 perf_sample_data_init(&data, 0, 0);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001331
1332 switch (counters) {
1333#define HANDLE_COUNTER(n) \
1334 case n + 1: \
1335 if (test_bit(n, cpuc->used_mask)) { \
David Daney82091562011-09-24 02:29:55 +02001336 counter = mipspmu.read_counter(n); \
1337 if (counter & mipspmu.overflow) { \
1338 handle_associated_event(cpuc, n, &data, regs); \
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001339 handled = IRQ_HANDLED; \
1340 } \
1341 }
1342 HANDLE_COUNTER(3)
1343 HANDLE_COUNTER(2)
1344 HANDLE_COUNTER(1)
1345 HANDLE_COUNTER(0)
1346 }
1347
1348 /*
1349 * Do all the work for the pending perf events. We can do this
1350 * in here because the performance counter interrupt is a regular
1351 * interrupt, not NMI.
1352 */
1353 if (handled == IRQ_HANDLED)
Deng-Cheng Zhu91f01732011-01-21 16:19:17 +08001354 irq_work_run();
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001355
Al Cooper399aaa22012-07-13 16:44:53 -04001356#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001357 read_unlock(&pmuint_rwlock);
1358#endif
1359 resume_local_counters();
1360 return handled;
1361}
1362
David Daney4409af32011-09-24 02:29:55 +02001363static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001364{
1365 return mipsxx_pmu_handle_shared_irq();
1366}
1367
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001368/* 24K */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001369#define IS_BOTH_COUNTERS_24K_EVENT(b) \
1370 ((b) == 0 || (b) == 1 || (b) == 11)
1371
1372/* 34K */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001373#define IS_BOTH_COUNTERS_34K_EVENT(b) \
1374 ((b) == 0 || (b) == 1 || (b) == 11)
1375#ifdef CONFIG_MIPS_MT_SMP
1376#define IS_RANGE_P_34K_EVENT(r, b) \
1377 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1378 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
1379 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
1380 ((b) >= 64 && (b) <= 67))
Ralf Baechle70342282013-01-22 12:59:30 +01001381#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001382#endif
1383
1384/* 74K */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001385#define IS_BOTH_COUNTERS_74K_EVENT(b) \
1386 ((b) == 0 || (b) == 1)
1387
Deng-Cheng Zhuc52068b2014-02-10 09:48:53 -08001388/* proAptiv */
1389#define IS_BOTH_COUNTERS_PROAPTIV_EVENT(b) \
1390 ((b) == 0 || (b) == 1)
James Hogan560b4612014-07-04 11:08:57 +01001391/* P5600 */
1392#define IS_BOTH_COUNTERS_P5600_EVENT(b) \
1393 ((b) == 0 || (b) == 1)
Deng-Cheng Zhuc52068b2014-02-10 09:48:53 -08001394
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001395/* 1004K */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001396#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1397 ((b) == 0 || (b) == 1 || (b) == 11)
1398#ifdef CONFIG_MIPS_MT_SMP
1399#define IS_RANGE_P_1004K_EVENT(r, b) \
1400 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1401 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
1402 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
1403 (r) == 188 || (b) == 61 || (b) == 62 || \
1404 ((b) >= 64 && (b) <= 67))
1405#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1406#endif
1407
Deng-Cheng Zhu9597e432014-02-10 09:48:54 -08001408/* interAptiv */
1409#define IS_BOTH_COUNTERS_INTERAPTIV_EVENT(b) \
1410 ((b) == 0 || (b) == 1 || (b) == 11)
1411#ifdef CONFIG_MIPS_MT_SMP
1412/* The P/V/T info is not provided for "(b) == 38" in SUM, assume P. */
1413#define IS_RANGE_P_INTERAPTIV_EVENT(r, b) \
1414 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1415 (b) == 25 || (b) == 36 || (b) == 38 || (b) == 39 || \
1416 (r) == 44 || (r) == 174 || (r) == 176 || ((b) >= 50 && \
1417 (b) <= 59) || (r) == 188 || (b) == 61 || (b) == 62 || \
1418 ((b) >= 64 && (b) <= 67))
1419#define IS_RANGE_V_INTERAPTIV_EVENT(r) ((r) == 47 || (r) == 175)
1420#endif
1421
Al Coopera7911a82012-07-13 16:44:54 -04001422/* BMIPS5000 */
1423#define IS_BOTH_COUNTERS_BMIPS5000_EVENT(b) \
1424 ((b) == 0 || (b) == 1)
1425
1426
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001427/*
James Hogan67dca662014-07-04 11:08:56 +01001428 * For most cores the user can use 0-255 raw events, where 0-127 for the events
1429 * of even counters, and 128-255 for odd counters. Note that bit 7 is used to
1430 * indicate the even/odd bank selector. So, for example, when user wants to take
1431 * the Event Num of 15 for odd counters (by referring to the user manual), then
1432 * 128 needs to be added to 15 as the input for the event config, i.e., 143 (0x8F)
1433 * to be used.
1434 *
1435 * Some newer cores have even more events, in which case the user can use raw
1436 * events 0-511, where 0-255 are for the events of even counters, and 256-511
1437 * are for odd counters, so bit 8 is used to indicate the even/odd bank selector.
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001438 */
David Daney4409af32011-09-24 02:29:55 +02001439static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001440{
James Hogan67dca662014-07-04 11:08:56 +01001441 /* currently most cores have 7-bit event numbers */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001442 unsigned int raw_id = config & 0xff;
1443 unsigned int base_id = raw_id & 0x7f;
1444
1445 switch (current_cpu_type()) {
1446 case CPU_24K:
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001447 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1448 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1449 else
1450 raw_event.cntr_mask =
1451 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1452#ifdef CONFIG_MIPS_MT_SMP
1453 /*
1454 * This is actually doing nothing. Non-multithreading
1455 * CPUs will not check and calculate the range.
1456 */
1457 raw_event.range = P;
1458#endif
1459 break;
1460 case CPU_34K:
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001461 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1462 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1463 else
1464 raw_event.cntr_mask =
1465 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1466#ifdef CONFIG_MIPS_MT_SMP
1467 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1468 raw_event.range = P;
1469 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1470 raw_event.range = V;
1471 else
1472 raw_event.range = T;
1473#endif
1474 break;
1475 case CPU_74K:
Steven J. Hill442e14a2014-01-17 15:03:50 -06001476 case CPU_1074K:
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001477 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1478 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1479 else
1480 raw_event.cntr_mask =
1481 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1482#ifdef CONFIG_MIPS_MT_SMP
1483 raw_event.range = P;
1484#endif
1485 break;
Deng-Cheng Zhuc52068b2014-02-10 09:48:53 -08001486 case CPU_PROAPTIV:
1487 if (IS_BOTH_COUNTERS_PROAPTIV_EVENT(base_id))
1488 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1489 else
1490 raw_event.cntr_mask =
1491 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1492#ifdef CONFIG_MIPS_MT_SMP
1493 raw_event.range = P;
1494#endif
1495 break;
James Hogan560b4612014-07-04 11:08:57 +01001496 case CPU_P5600:
1497 /* 8-bit event numbers */
1498 raw_id = config & 0x1ff;
1499 base_id = raw_id & 0xff;
1500 if (IS_BOTH_COUNTERS_P5600_EVENT(base_id))
1501 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1502 else
1503 raw_event.cntr_mask =
1504 raw_id > 255 ? CNTR_ODD : CNTR_EVEN;
1505#ifdef CONFIG_MIPS_MT_SMP
1506 raw_event.range = P;
1507#endif
1508 break;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001509 case CPU_1004K:
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001510 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1511 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1512 else
1513 raw_event.cntr_mask =
1514 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1515#ifdef CONFIG_MIPS_MT_SMP
1516 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1517 raw_event.range = P;
1518 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1519 raw_event.range = V;
1520 else
1521 raw_event.range = T;
1522#endif
1523 break;
Deng-Cheng Zhu9597e432014-02-10 09:48:54 -08001524 case CPU_INTERAPTIV:
1525 if (IS_BOTH_COUNTERS_INTERAPTIV_EVENT(base_id))
1526 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1527 else
1528 raw_event.cntr_mask =
1529 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1530#ifdef CONFIG_MIPS_MT_SMP
1531 if (IS_RANGE_P_INTERAPTIV_EVENT(raw_id, base_id))
1532 raw_event.range = P;
1533 else if (unlikely(IS_RANGE_V_INTERAPTIV_EVENT(raw_id)))
1534 raw_event.range = V;
1535 else
1536 raw_event.range = T;
1537#endif
1538 break;
Al Coopera7911a82012-07-13 16:44:54 -04001539 case CPU_BMIPS5000:
1540 if (IS_BOTH_COUNTERS_BMIPS5000_EVENT(base_id))
1541 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1542 else
1543 raw_event.cntr_mask =
1544 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001545 }
1546
James Hogan67dca662014-07-04 11:08:56 +01001547 raw_event.event_id = base_id;
1548
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001549 return &raw_event;
1550}
1551
David Daney939991c2011-09-24 02:29:55 +02001552static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1553{
1554 unsigned int raw_id = config & 0xff;
1555 unsigned int base_id = raw_id & 0x7f;
1556
1557
1558 raw_event.cntr_mask = CNTR_ALL;
1559 raw_event.event_id = base_id;
1560
1561 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1562 if (base_id > 0x42)
1563 return ERR_PTR(-EOPNOTSUPP);
1564 } else {
1565 if (base_id > 0x3a)
1566 return ERR_PTR(-EOPNOTSUPP);
1567 }
1568
1569 switch (base_id) {
1570 case 0x00:
1571 case 0x0f:
1572 case 0x1e:
1573 case 0x1f:
1574 case 0x2f:
1575 case 0x34:
1576 case 0x3b ... 0x3f:
1577 return ERR_PTR(-EOPNOTSUPP);
1578 default:
1579 break;
1580 }
1581
1582 return &raw_event;
1583}
1584
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001585static const struct mips_perf_event *xlp_pmu_map_raw_event(u64 config)
1586{
1587 unsigned int raw_id = config & 0xff;
1588
1589 /* Only 1-63 are defined */
1590 if ((raw_id < 0x01) || (raw_id > 0x3f))
1591 return ERR_PTR(-EOPNOTSUPP);
1592
1593 raw_event.cntr_mask = CNTR_ALL;
1594 raw_event.event_id = raw_id;
1595
1596 return &raw_event;
1597}
1598
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001599static int __init
1600init_hw_perf_events(void)
1601{
1602 int counters, irq;
David Daney82091562011-09-24 02:29:55 +02001603 int counter_bits;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001604
1605 pr_info("Performance counters: ");
1606
1607 counters = n_counters();
1608 if (counters == 0) {
1609 pr_cont("No available PMU.\n");
1610 return -ENODEV;
1611 }
1612
Al Cooper399aaa22012-07-13 16:44:53 -04001613#ifdef CONFIG_MIPS_PERF_SHARED_TC_COUNTERS
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001614 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1615 if (!cpu_has_mipsmt_pertccounters)
1616 counters = counters_total_to_per_cpu(counters);
1617#endif
1618
Andrew Brestickera669efc2014-09-18 14:47:12 -07001619 if (get_c0_perfcount_int)
1620 irq = get_c0_perfcount_int();
James Hogan7eca5b12015-01-27 21:45:49 +00001621 else if (cp0_perfcount_irq >= 0)
Andrew Brestickera669efc2014-09-18 14:47:12 -07001622 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1623 else
1624 irq = -1;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001625
David Daney82091562011-09-24 02:29:55 +02001626 mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001627
1628 switch (current_cpu_type()) {
1629 case CPU_24K:
David Daney82091562011-09-24 02:29:55 +02001630 mipspmu.name = "mips/24K";
1631 mipspmu.general_event_map = &mipsxxcore_event_map;
1632 mipspmu.cache_event_map = &mipsxxcore_cache_map;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001633 break;
1634 case CPU_34K:
David Daney82091562011-09-24 02:29:55 +02001635 mipspmu.name = "mips/34K";
1636 mipspmu.general_event_map = &mipsxxcore_event_map;
1637 mipspmu.cache_event_map = &mipsxxcore_cache_map;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001638 break;
1639 case CPU_74K:
David Daney82091562011-09-24 02:29:55 +02001640 mipspmu.name = "mips/74K";
Deng-Cheng Zhu6b0b84292014-02-10 09:48:52 -08001641 mipspmu.general_event_map = &mipsxxcore_event_map2;
1642 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001643 break;
Deng-Cheng Zhuc52068b2014-02-10 09:48:53 -08001644 case CPU_PROAPTIV:
1645 mipspmu.name = "mips/proAptiv";
1646 mipspmu.general_event_map = &mipsxxcore_event_map2;
1647 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1648 break;
James Hogan560b4612014-07-04 11:08:57 +01001649 case CPU_P5600:
1650 mipspmu.name = "mips/P5600";
1651 mipspmu.general_event_map = &mipsxxcore_event_map2;
1652 mipspmu.cache_event_map = &mipsxxcore_cache_map2;
1653 break;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001654 case CPU_1004K:
David Daney82091562011-09-24 02:29:55 +02001655 mipspmu.name = "mips/1004K";
1656 mipspmu.general_event_map = &mipsxxcore_event_map;
1657 mipspmu.cache_event_map = &mipsxxcore_cache_map;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001658 break;
Steven J. Hill442e14a2014-01-17 15:03:50 -06001659 case CPU_1074K:
1660 mipspmu.name = "mips/1074K";
1661 mipspmu.general_event_map = &mipsxxcore_event_map;
1662 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1663 break;
Deng-Cheng Zhu9597e432014-02-10 09:48:54 -08001664 case CPU_INTERAPTIV:
1665 mipspmu.name = "mips/interAptiv";
1666 mipspmu.general_event_map = &mipsxxcore_event_map;
1667 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1668 break;
Kelvin Cheung2fa36392012-06-20 20:05:32 +01001669 case CPU_LOONGSON1:
1670 mipspmu.name = "mips/loongson1";
1671 mipspmu.general_event_map = &mipsxxcore_event_map;
1672 mipspmu.cache_event_map = &mipsxxcore_cache_map;
1673 break;
David Daney939991c2011-09-24 02:29:55 +02001674 case CPU_CAVIUM_OCTEON:
1675 case CPU_CAVIUM_OCTEON_PLUS:
1676 case CPU_CAVIUM_OCTEON2:
1677 mipspmu.name = "octeon";
1678 mipspmu.general_event_map = &octeon_event_map;
1679 mipspmu.cache_event_map = &octeon_cache_map;
1680 mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1681 break;
Al Coopera7911a82012-07-13 16:44:54 -04001682 case CPU_BMIPS5000:
1683 mipspmu.name = "BMIPS5000";
1684 mipspmu.general_event_map = &bmips5000_event_map;
1685 mipspmu.cache_event_map = &bmips5000_cache_map;
1686 break;
Zi Shen Lim4be3d2f2012-10-31 12:01:28 +00001687 case CPU_XLP:
1688 mipspmu.name = "xlp";
1689 mipspmu.general_event_map = &xlp_event_map;
1690 mipspmu.cache_event_map = &xlp_cache_map;
1691 mipspmu.map_raw_event = xlp_pmu_map_raw_event;
1692 break;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001693 default:
1694 pr_cont("Either hardware does not support performance "
1695 "counters, or not yet implemented.\n");
1696 return -ENODEV;
1697 }
1698
David Daney82091562011-09-24 02:29:55 +02001699 mipspmu.num_counters = counters;
1700 mipspmu.irq = irq;
1701
1702 if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
1703 mipspmu.max_period = (1ULL << 63) - 1;
1704 mipspmu.valid_count = (1ULL << 63) - 1;
1705 mipspmu.overflow = 1ULL << 63;
1706 mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1707 mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1708 counter_bits = 64;
1709 } else {
1710 mipspmu.max_period = (1ULL << 31) - 1;
1711 mipspmu.valid_count = (1ULL << 31) - 1;
1712 mipspmu.overflow = 1ULL << 31;
1713 mipspmu.read_counter = mipsxx_pmu_read_counter;
1714 mipspmu.write_counter = mipsxx_pmu_write_counter;
1715 counter_bits = 32;
1716 }
1717
1718 on_each_cpu(reset_counters, (void *)(long)counters, 1);
1719
1720 pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1721 "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1722 irq < 0 ? " (share with timer interrupt)" : "");
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001723
Deng-Cheng Zhu404ff632011-01-21 16:19:18 +08001724 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1725
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001726 return 0;
1727}
Peter Zijlstra004417a2010-11-25 18:38:29 +01001728early_initcall(init_hw_perf_events);