blob: bda4bc9e6988bcab40f6b5513748c861d7a0adec [file] [log] [blame]
David Daneye5dcb582011-09-24 02:29:55 +02001/*
2 * Linux performance counter support for MIPS.
3 *
4 * Copyright (C) 2010 MIPS Technologies, Inc.
David Daney82091562011-09-24 02:29:55 +02005 * Copyright (C) 2011 Cavium Networks, Inc.
David Daneye5dcb582011-09-24 02:29:55 +02006 * Author: Deng-Cheng Zhu
7 *
8 * This code is based on the implementation for ARM, which is in turn
9 * based on the sparc64 perf event code and the x86 code. Performance
10 * counter access is based on the MIPS Oprofile code. And the callchain
11 * support references the code of MIPS stacktrace.c.
12 *
13 * This program is free software; you can redistribute it and/or modify
14 * it under the terms of the GNU General Public License version 2 as
15 * published by the Free Software Foundation.
16 */
17
18#include <linux/cpumask.h>
19#include <linux/interrupt.h>
20#include <linux/smp.h>
21#include <linux/kernel.h>
22#include <linux/perf_event.h>
23#include <linux/uaccess.h>
24
25#include <asm/irq.h>
26#include <asm/irq_regs.h>
27#include <asm/stacktrace.h>
28#include <asm/time.h> /* For perf_irq */
29
David Daneye5dcb582011-09-24 02:29:55 +020030#define MIPS_MAX_HWEVENTS 4
31
32struct cpu_hw_events {
33 /* Array of events on this cpu. */
34 struct perf_event *events[MIPS_MAX_HWEVENTS];
35
36 /*
37 * Set the bit (indexed by the counter number) when the counter
38 * is used for an event.
39 */
40 unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
41
42 /*
David Daneye5dcb582011-09-24 02:29:55 +020043 * Software copy of the control register for each performance counter.
44 * MIPS CPUs vary in performance counters. They use this differently,
45 * and even may not use it.
46 */
47 unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
48};
49DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
50 .saved_ctrl = {0},
51};
52
53/* The description of MIPS performance events. */
54struct mips_perf_event {
55 unsigned int event_id;
56 /*
57 * MIPS performance counters are indexed starting from 0.
58 * CNTR_EVEN indicates the indexes of the counters to be used are
59 * even numbers.
60 */
61 unsigned int cntr_mask;
62 #define CNTR_EVEN 0x55555555
63 #define CNTR_ODD 0xaaaaaaaa
David Daney82091562011-09-24 02:29:55 +020064 #define CNTR_ALL 0xffffffff
David Daneye5dcb582011-09-24 02:29:55 +020065#ifdef CONFIG_MIPS_MT_SMP
66 enum {
67 T = 0,
68 V = 1,
69 P = 2,
70 } range;
71#else
72 #define T
73 #define V
74 #define P
75#endif
76};
77
78static struct mips_perf_event raw_event;
79static DEFINE_MUTEX(raw_event_mutex);
80
81#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
82#define C(x) PERF_COUNT_HW_CACHE_##x
83
84struct mips_pmu {
David Daney82091562011-09-24 02:29:55 +020085 u64 max_period;
86 u64 valid_count;
87 u64 overflow;
David Daneye5dcb582011-09-24 02:29:55 +020088 const char *name;
89 int irq;
David Daneye5dcb582011-09-24 02:29:55 +020090 u64 (*read_counter)(unsigned int idx);
91 void (*write_counter)(unsigned int idx, u64 val);
David Daneye5dcb582011-09-24 02:29:55 +020092 const struct mips_perf_event *(*map_raw_event)(u64 config);
93 const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
94 const struct mips_perf_event (*cache_event_map)
95 [PERF_COUNT_HW_CACHE_MAX]
96 [PERF_COUNT_HW_CACHE_OP_MAX]
97 [PERF_COUNT_HW_CACHE_RESULT_MAX];
98 unsigned int num_counters;
99};
100
David Daney82091562011-09-24 02:29:55 +0200101static struct mips_pmu mipspmu;
102
103#define M_CONFIG1_PC (1 << 4)
104
105#define M_PERFCTL_EXL (1 << 0)
106#define M_PERFCTL_KERNEL (1 << 1)
107#define M_PERFCTL_SUPERVISOR (1 << 2)
108#define M_PERFCTL_USER (1 << 3)
109#define M_PERFCTL_INTERRUPT_ENABLE (1 << 4)
110#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
111#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
112#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
113#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
114#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
115#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
116#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
117#define M_PERFCTL_WIDE (1 << 30)
118#define M_PERFCTL_MORE (1 << 31)
119
120#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
121 M_PERFCTL_KERNEL | \
122 M_PERFCTL_USER | \
123 M_PERFCTL_SUPERVISOR | \
124 M_PERFCTL_INTERRUPT_ENABLE)
125
126#ifdef CONFIG_MIPS_MT_SMP
127#define M_PERFCTL_CONFIG_MASK 0x3fff801f
128#else
129#define M_PERFCTL_CONFIG_MASK 0x1f
130#endif
131#define M_PERFCTL_EVENT_MASK 0xfe0
132
133
134#ifdef CONFIG_MIPS_MT_SMP
135static int cpu_has_mipsmt_pertccounters;
136
137static DEFINE_RWLOCK(pmuint_rwlock);
138
139/*
140 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
141 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
142 */
143#if defined(CONFIG_HW_PERF_EVENTS)
144#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
145 0 : smp_processor_id())
146#else
147#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
148 0 : cpu_data[smp_processor_id()].vpe_id)
149#endif
150
151/* Copied from op_model_mipsxx.c */
152static unsigned int vpe_shift(void)
153{
154 if (num_possible_cpus() > 1)
155 return 1;
156
157 return 0;
158}
159
160static unsigned int counters_total_to_per_cpu(unsigned int counters)
161{
162 return counters >> vpe_shift();
163}
164
165static unsigned int counters_per_cpu_to_total(unsigned int counters)
166{
167 return counters << vpe_shift();
168}
169
170#else /* !CONFIG_MIPS_MT_SMP */
171#define vpe_id() 0
172
173#endif /* CONFIG_MIPS_MT_SMP */
174
175static void resume_local_counters(void);
176static void pause_local_counters(void);
177static irqreturn_t mipsxx_pmu_handle_irq(int, void *);
178static int mipsxx_pmu_handle_shared_irq(void);
179
180static unsigned int mipsxx_pmu_swizzle_perf_idx(unsigned int idx)
181{
182 if (vpe_id() == 1)
183 idx = (idx + 2) & 3;
184 return idx;
185}
186
187static u64 mipsxx_pmu_read_counter(unsigned int idx)
188{
189 idx = mipsxx_pmu_swizzle_perf_idx(idx);
190
191 switch (idx) {
192 case 0:
193 /*
194 * The counters are unsigned, we must cast to truncate
195 * off the high bits.
196 */
197 return (u32)read_c0_perfcntr0();
198 case 1:
199 return (u32)read_c0_perfcntr1();
200 case 2:
201 return (u32)read_c0_perfcntr2();
202 case 3:
203 return (u32)read_c0_perfcntr3();
204 default:
205 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
206 return 0;
207 }
208}
209
210static u64 mipsxx_pmu_read_counter_64(unsigned int idx)
211{
212 idx = mipsxx_pmu_swizzle_perf_idx(idx);
213
214 switch (idx) {
215 case 0:
216 return read_c0_perfcntr0_64();
217 case 1:
218 return read_c0_perfcntr1_64();
219 case 2:
220 return read_c0_perfcntr2_64();
221 case 3:
222 return read_c0_perfcntr3_64();
223 default:
224 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
225 return 0;
226 }
227}
228
229static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
230{
231 idx = mipsxx_pmu_swizzle_perf_idx(idx);
232
233 switch (idx) {
234 case 0:
235 write_c0_perfcntr0(val);
236 return;
237 case 1:
238 write_c0_perfcntr1(val);
239 return;
240 case 2:
241 write_c0_perfcntr2(val);
242 return;
243 case 3:
244 write_c0_perfcntr3(val);
245 return;
246 }
247}
248
249static void mipsxx_pmu_write_counter_64(unsigned int idx, u64 val)
250{
251 idx = mipsxx_pmu_swizzle_perf_idx(idx);
252
253 switch (idx) {
254 case 0:
255 write_c0_perfcntr0_64(val);
256 return;
257 case 1:
258 write_c0_perfcntr1_64(val);
259 return;
260 case 2:
261 write_c0_perfcntr2_64(val);
262 return;
263 case 3:
264 write_c0_perfcntr3_64(val);
265 return;
266 }
267}
268
269static unsigned int mipsxx_pmu_read_control(unsigned int idx)
270{
271 idx = mipsxx_pmu_swizzle_perf_idx(idx);
272
273 switch (idx) {
274 case 0:
275 return read_c0_perfctrl0();
276 case 1:
277 return read_c0_perfctrl1();
278 case 2:
279 return read_c0_perfctrl2();
280 case 3:
281 return read_c0_perfctrl3();
282 default:
283 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
284 return 0;
285 }
286}
287
288static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
289{
290 idx = mipsxx_pmu_swizzle_perf_idx(idx);
291
292 switch (idx) {
293 case 0:
294 write_c0_perfctrl0(val);
295 return;
296 case 1:
297 write_c0_perfctrl1(val);
298 return;
299 case 2:
300 write_c0_perfctrl2(val);
301 return;
302 case 3:
303 write_c0_perfctrl3(val);
304 return;
305 }
306}
307
308static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
309 struct hw_perf_event *hwc)
310{
311 int i;
312
313 /*
314 * We only need to care the counter mask. The range has been
315 * checked definitely.
316 */
317 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
318
319 for (i = mipspmu.num_counters - 1; i >= 0; i--) {
320 /*
321 * Note that some MIPS perf events can be counted by both
322 * even and odd counters, wheresas many other are only by
323 * even _or_ odd counters. This introduces an issue that
324 * when the former kind of event takes the counter the
325 * latter kind of event wants to use, then the "counter
326 * allocation" for the latter event will fail. In fact if
327 * they can be dynamically swapped, they both feel happy.
328 * But here we leave this issue alone for now.
329 */
330 if (test_bit(i, &cntr_mask) &&
331 !test_and_set_bit(i, cpuc->used_mask))
332 return i;
333 }
334
335 return -EAGAIN;
336}
337
338static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
339{
340 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
341
342 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
343
344 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
345 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
346 /* Make sure interrupt enabled. */
347 M_PERFCTL_INTERRUPT_ENABLE;
348 /*
349 * We do not actually let the counter run. Leave it until start().
350 */
351}
352
353static void mipsxx_pmu_disable_event(int idx)
354{
355 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
356 unsigned long flags;
357
358 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
359
360 local_irq_save(flags);
361 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
362 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
363 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
364 local_irq_restore(flags);
365}
David Daneye5dcb582011-09-24 02:29:55 +0200366
367static int mipspmu_event_set_period(struct perf_event *event,
368 struct hw_perf_event *hwc,
369 int idx)
370{
David Daney82091562011-09-24 02:29:55 +0200371 u64 left = local64_read(&hwc->period_left);
372 u64 period = hwc->sample_period;
David Daneye5dcb582011-09-24 02:29:55 +0200373 int ret = 0;
David Daneye5dcb582011-09-24 02:29:55 +0200374
David Daney82091562011-09-24 02:29:55 +0200375 if (unlikely((left + period) & (1ULL << 63))) {
376 /* left underflowed by more than period. */
David Daneye5dcb582011-09-24 02:29:55 +0200377 left = period;
378 local64_set(&hwc->period_left, left);
379 hwc->last_period = period;
380 ret = 1;
David Daney82091562011-09-24 02:29:55 +0200381 } else if (unlikely((left + period) <= period)) {
382 /* left underflowed by less than period. */
David Daneye5dcb582011-09-24 02:29:55 +0200383 left += period;
384 local64_set(&hwc->period_left, left);
385 hwc->last_period = period;
386 ret = 1;
387 }
388
David Daney82091562011-09-24 02:29:55 +0200389 if (left > mipspmu.max_period) {
390 left = mipspmu.max_period;
391 local64_set(&hwc->period_left, left);
392 }
David Daneye5dcb582011-09-24 02:29:55 +0200393
David Daney82091562011-09-24 02:29:55 +0200394 local64_set(&hwc->prev_count, mipspmu.overflow - left);
David Daneye5dcb582011-09-24 02:29:55 +0200395
David Daney82091562011-09-24 02:29:55 +0200396 mipspmu.write_counter(idx, mipspmu.overflow - left);
David Daneye5dcb582011-09-24 02:29:55 +0200397
398 perf_event_update_userpage(event);
399
400 return ret;
401}
402
403static void mipspmu_event_update(struct perf_event *event,
404 struct hw_perf_event *hwc,
405 int idx)
406{
David Daney82091562011-09-24 02:29:55 +0200407 u64 prev_raw_count, new_raw_count;
David Daneye5dcb582011-09-24 02:29:55 +0200408 u64 delta;
409
410again:
411 prev_raw_count = local64_read(&hwc->prev_count);
David Daney82091562011-09-24 02:29:55 +0200412 new_raw_count = mipspmu.read_counter(idx);
David Daneye5dcb582011-09-24 02:29:55 +0200413
414 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
415 new_raw_count) != prev_raw_count)
416 goto again;
417
David Daney82091562011-09-24 02:29:55 +0200418 delta = new_raw_count - prev_raw_count;
David Daneye5dcb582011-09-24 02:29:55 +0200419
420 local64_add(delta, &event->count);
421 local64_sub(delta, &hwc->period_left);
422}
423
424static void mipspmu_start(struct perf_event *event, int flags)
425{
426 struct hw_perf_event *hwc = &event->hw;
427
David Daneye5dcb582011-09-24 02:29:55 +0200428 if (flags & PERF_EF_RELOAD)
429 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
430
431 hwc->state = 0;
432
433 /* Set the period for the event. */
434 mipspmu_event_set_period(event, hwc, hwc->idx);
435
436 /* Enable the event. */
David Daney82091562011-09-24 02:29:55 +0200437 mipsxx_pmu_enable_event(hwc, hwc->idx);
David Daneye5dcb582011-09-24 02:29:55 +0200438}
439
440static void mipspmu_stop(struct perf_event *event, int flags)
441{
442 struct hw_perf_event *hwc = &event->hw;
443
David Daneye5dcb582011-09-24 02:29:55 +0200444 if (!(hwc->state & PERF_HES_STOPPED)) {
445 /* We are working on a local event. */
David Daney82091562011-09-24 02:29:55 +0200446 mipsxx_pmu_disable_event(hwc->idx);
David Daneye5dcb582011-09-24 02:29:55 +0200447 barrier();
448 mipspmu_event_update(event, hwc, hwc->idx);
449 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
450 }
451}
452
453static int mipspmu_add(struct perf_event *event, int flags)
454{
455 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
456 struct hw_perf_event *hwc = &event->hw;
457 int idx;
458 int err = 0;
459
460 perf_pmu_disable(event->pmu);
461
462 /* To look for a free counter for this event. */
David Daney82091562011-09-24 02:29:55 +0200463 idx = mipsxx_pmu_alloc_counter(cpuc, hwc);
David Daneye5dcb582011-09-24 02:29:55 +0200464 if (idx < 0) {
465 err = idx;
466 goto out;
467 }
468
469 /*
470 * If there is an event in the counter we are going to use then
471 * make sure it is disabled.
472 */
473 event->hw.idx = idx;
David Daney82091562011-09-24 02:29:55 +0200474 mipsxx_pmu_disable_event(idx);
David Daneye5dcb582011-09-24 02:29:55 +0200475 cpuc->events[idx] = event;
476
477 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
478 if (flags & PERF_EF_START)
479 mipspmu_start(event, PERF_EF_RELOAD);
480
481 /* Propagate our changes to the userspace mapping. */
482 perf_event_update_userpage(event);
483
484out:
485 perf_pmu_enable(event->pmu);
486 return err;
487}
488
489static void mipspmu_del(struct perf_event *event, int flags)
490{
491 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
492 struct hw_perf_event *hwc = &event->hw;
493 int idx = hwc->idx;
494
David Daney82091562011-09-24 02:29:55 +0200495 WARN_ON(idx < 0 || idx >= mipspmu.num_counters);
David Daneye5dcb582011-09-24 02:29:55 +0200496
497 mipspmu_stop(event, PERF_EF_UPDATE);
498 cpuc->events[idx] = NULL;
499 clear_bit(idx, cpuc->used_mask);
500
501 perf_event_update_userpage(event);
502}
503
504static void mipspmu_read(struct perf_event *event)
505{
506 struct hw_perf_event *hwc = &event->hw;
507
508 /* Don't read disabled counters! */
509 if (hwc->idx < 0)
510 return;
511
512 mipspmu_event_update(event, hwc, hwc->idx);
513}
514
515static void mipspmu_enable(struct pmu *pmu)
516{
David Daney82091562011-09-24 02:29:55 +0200517#ifdef CONFIG_MIPS_MT_SMP
518 write_unlock(&pmuint_rwlock);
519#endif
520 resume_local_counters();
David Daneye5dcb582011-09-24 02:29:55 +0200521}
522
David Daney82091562011-09-24 02:29:55 +0200523/*
524 * MIPS performance counters can be per-TC. The control registers can
525 * not be directly accessed accross CPUs. Hence if we want to do global
526 * control, we need cross CPU calls. on_each_cpu() can help us, but we
527 * can not make sure this function is called with interrupts enabled. So
528 * here we pause local counters and then grab a rwlock and leave the
529 * counters on other CPUs alone. If any counter interrupt raises while
530 * we own the write lock, simply pause local counters on that CPU and
531 * spin in the handler. Also we know we won't be switched to another
532 * CPU after pausing local counters and before grabbing the lock.
533 */
David Daneye5dcb582011-09-24 02:29:55 +0200534static void mipspmu_disable(struct pmu *pmu)
535{
David Daney82091562011-09-24 02:29:55 +0200536 pause_local_counters();
537#ifdef CONFIG_MIPS_MT_SMP
538 write_lock(&pmuint_rwlock);
539#endif
David Daneye5dcb582011-09-24 02:29:55 +0200540}
541
542static atomic_t active_events = ATOMIC_INIT(0);
543static DEFINE_MUTEX(pmu_reserve_mutex);
544static int (*save_perf_irq)(void);
545
546static int mipspmu_get_irq(void)
547{
548 int err;
549
David Daney82091562011-09-24 02:29:55 +0200550 if (mipspmu.irq >= 0) {
David Daneye5dcb582011-09-24 02:29:55 +0200551 /* Request my own irq handler. */
David Daney82091562011-09-24 02:29:55 +0200552 err = request_irq(mipspmu.irq, mipsxx_pmu_handle_irq,
553 IRQF_PERCPU | IRQF_NOBALANCING,
David Daneye5dcb582011-09-24 02:29:55 +0200554 "mips_perf_pmu", NULL);
555 if (err) {
556 pr_warning("Unable to request IRQ%d for MIPS "
David Daney82091562011-09-24 02:29:55 +0200557 "performance counters!\n", mipspmu.irq);
David Daneye5dcb582011-09-24 02:29:55 +0200558 }
559 } else if (cp0_perfcount_irq < 0) {
560 /*
561 * We are sharing the irq number with the timer interrupt.
562 */
563 save_perf_irq = perf_irq;
David Daney82091562011-09-24 02:29:55 +0200564 perf_irq = mipsxx_pmu_handle_shared_irq;
David Daneye5dcb582011-09-24 02:29:55 +0200565 err = 0;
566 } else {
567 pr_warning("The platform hasn't properly defined its "
568 "interrupt controller.\n");
569 err = -ENOENT;
570 }
571
572 return err;
573}
574
575static void mipspmu_free_irq(void)
576{
David Daney82091562011-09-24 02:29:55 +0200577 if (mipspmu.irq >= 0)
578 free_irq(mipspmu.irq, NULL);
David Daneye5dcb582011-09-24 02:29:55 +0200579 else if (cp0_perfcount_irq < 0)
580 perf_irq = save_perf_irq;
581}
582
583/*
584 * mipsxx/rm9000/loongson2 have different performance counters, they have
585 * specific low-level init routines.
586 */
587static void reset_counters(void *arg);
588static int __hw_perf_event_init(struct perf_event *event);
589
590static void hw_perf_event_destroy(struct perf_event *event)
591{
592 if (atomic_dec_and_mutex_lock(&active_events,
593 &pmu_reserve_mutex)) {
594 /*
595 * We must not call the destroy function with interrupts
596 * disabled.
597 */
598 on_each_cpu(reset_counters,
David Daney82091562011-09-24 02:29:55 +0200599 (void *)(long)mipspmu.num_counters, 1);
David Daneye5dcb582011-09-24 02:29:55 +0200600 mipspmu_free_irq();
601 mutex_unlock(&pmu_reserve_mutex);
602 }
603}
604
605static int mipspmu_event_init(struct perf_event *event)
606{
607 int err = 0;
608
609 switch (event->attr.type) {
610 case PERF_TYPE_RAW:
611 case PERF_TYPE_HARDWARE:
612 case PERF_TYPE_HW_CACHE:
613 break;
614
615 default:
616 return -ENOENT;
617 }
618
David Daney82091562011-09-24 02:29:55 +0200619 if (event->cpu >= nr_cpumask_bits ||
620 (event->cpu >= 0 && !cpu_online(event->cpu)))
David Daneye5dcb582011-09-24 02:29:55 +0200621 return -ENODEV;
622
623 if (!atomic_inc_not_zero(&active_events)) {
David Daneye5dcb582011-09-24 02:29:55 +0200624 mutex_lock(&pmu_reserve_mutex);
625 if (atomic_read(&active_events) == 0)
626 err = mipspmu_get_irq();
627
628 if (!err)
629 atomic_inc(&active_events);
630 mutex_unlock(&pmu_reserve_mutex);
631 }
632
633 if (err)
634 return err;
635
Deng-Cheng Zhuff5d7262011-11-22 03:28:48 +0800636 return __hw_perf_event_init(event);
David Daneye5dcb582011-09-24 02:29:55 +0200637}
638
639static struct pmu pmu = {
640 .pmu_enable = mipspmu_enable,
641 .pmu_disable = mipspmu_disable,
642 .event_init = mipspmu_event_init,
643 .add = mipspmu_add,
644 .del = mipspmu_del,
645 .start = mipspmu_start,
646 .stop = mipspmu_stop,
647 .read = mipspmu_read,
648};
649
650static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
651{
652/*
653 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
654 * event_id.
655 */
656#ifdef CONFIG_MIPS_MT_SMP
657 return ((unsigned int)pev->range << 24) |
658 (pev->cntr_mask & 0xffff00) |
659 (pev->event_id & 0xff);
660#else
661 return (pev->cntr_mask & 0xffff00) |
662 (pev->event_id & 0xff);
663#endif
664}
665
666static const struct mips_perf_event *mipspmu_map_general_event(int idx)
667{
668 const struct mips_perf_event *pev;
669
David Daney82091562011-09-24 02:29:55 +0200670 pev = ((*mipspmu.general_event_map)[idx].event_id ==
David Daneye5dcb582011-09-24 02:29:55 +0200671 UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
David Daney82091562011-09-24 02:29:55 +0200672 &(*mipspmu.general_event_map)[idx]);
David Daneye5dcb582011-09-24 02:29:55 +0200673
674 return pev;
675}
676
677static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
678{
679 unsigned int cache_type, cache_op, cache_result;
680 const struct mips_perf_event *pev;
681
682 cache_type = (config >> 0) & 0xff;
683 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
684 return ERR_PTR(-EINVAL);
685
686 cache_op = (config >> 8) & 0xff;
687 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
688 return ERR_PTR(-EINVAL);
689
690 cache_result = (config >> 16) & 0xff;
691 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
692 return ERR_PTR(-EINVAL);
693
David Daney82091562011-09-24 02:29:55 +0200694 pev = &((*mipspmu.cache_event_map)
David Daneye5dcb582011-09-24 02:29:55 +0200695 [cache_type]
696 [cache_op]
697 [cache_result]);
698
699 if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
700 return ERR_PTR(-EOPNOTSUPP);
701
702 return pev;
703
704}
705
David Daneye5dcb582011-09-24 02:29:55 +0200706static int validate_group(struct perf_event *event)
707{
708 struct perf_event *sibling, *leader = event->group_leader;
709 struct cpu_hw_events fake_cpuc;
710
711 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
712
Deng-Cheng Zhu266623b72011-11-22 03:28:47 +0800713 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &leader->hw) < 0)
David Daneye5dcb582011-09-24 02:29:55 +0200714 return -ENOSPC;
715
716 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
Deng-Cheng Zhu266623b72011-11-22 03:28:47 +0800717 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &sibling->hw) < 0)
David Daneye5dcb582011-09-24 02:29:55 +0200718 return -ENOSPC;
719 }
720
Deng-Cheng Zhu266623b72011-11-22 03:28:47 +0800721 if (mipsxx_pmu_alloc_counter(&fake_cpuc, &event->hw) < 0)
David Daneye5dcb582011-09-24 02:29:55 +0200722 return -ENOSPC;
723
724 return 0;
725}
726
727/* This is needed by specific irq handlers in perf_event_*.c */
728static void handle_associated_event(struct cpu_hw_events *cpuc,
729 int idx, struct perf_sample_data *data,
730 struct pt_regs *regs)
731{
732 struct perf_event *event = cpuc->events[idx];
733 struct hw_perf_event *hwc = &event->hw;
734
735 mipspmu_event_update(event, hwc, idx);
736 data->period = event->hw.last_period;
737 if (!mipspmu_event_set_period(event, hwc, idx))
738 return;
739
740 if (perf_event_overflow(event, data, regs))
David Daney82091562011-09-24 02:29:55 +0200741 mipsxx_pmu_disable_event(idx);
David Daneye5dcb582011-09-24 02:29:55 +0200742}
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800743
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800744
David Daney4409af32011-09-24 02:29:55 +0200745static int __n_counters(void)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800746{
747 if (!(read_c0_config1() & M_CONFIG1_PC))
748 return 0;
749 if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
750 return 1;
751 if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
752 return 2;
753 if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
754 return 3;
755
756 return 4;
757}
758
David Daney4409af32011-09-24 02:29:55 +0200759static int n_counters(void)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800760{
761 int counters;
762
763 switch (current_cpu_type()) {
764 case CPU_R10000:
765 counters = 2;
766 break;
767
768 case CPU_R12000:
769 case CPU_R14000:
770 counters = 4;
771 break;
772
773 default:
774 counters = __n_counters();
775 }
776
777 return counters;
778}
779
780static void reset_counters(void *arg)
781{
782 int counters = (int)(long)arg;
783 switch (counters) {
784 case 4:
David Daney82091562011-09-24 02:29:55 +0200785 mipsxx_pmu_write_control(3, 0);
786 mipspmu.write_counter(3, 0);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800787 case 3:
David Daney82091562011-09-24 02:29:55 +0200788 mipsxx_pmu_write_control(2, 0);
789 mipspmu.write_counter(2, 0);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800790 case 2:
David Daney82091562011-09-24 02:29:55 +0200791 mipsxx_pmu_write_control(1, 0);
792 mipspmu.write_counter(1, 0);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800793 case 1:
David Daney82091562011-09-24 02:29:55 +0200794 mipsxx_pmu_write_control(0, 0);
795 mipspmu.write_counter(0, 0);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800796 }
797}
798
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800799/* 24K/34K/1004K cores can share the same event map. */
800static const struct mips_perf_event mipsxxcore_event_map
801 [PERF_COUNT_HW_MAX] = {
802 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
803 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
804 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
805 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
806 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
807 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
808 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
809};
810
811/* 74K core has different branch event code. */
812static const struct mips_perf_event mipsxx74Kcore_event_map
813 [PERF_COUNT_HW_MAX] = {
814 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
815 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
816 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
817 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
818 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
819 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
820 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
821};
822
David Daney939991c2011-09-24 02:29:55 +0200823static const struct mips_perf_event octeon_event_map[PERF_COUNT_HW_MAX] = {
824 [PERF_COUNT_HW_CPU_CYCLES] = { 0x01, CNTR_ALL },
825 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x03, CNTR_ALL },
826 [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x2b, CNTR_ALL },
827 [PERF_COUNT_HW_CACHE_MISSES] = { 0x2e, CNTR_ALL },
828 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x08, CNTR_ALL },
829 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x09, CNTR_ALL },
830 [PERF_COUNT_HW_BUS_CYCLES] = { 0x25, CNTR_ALL },
831};
832
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800833/* 24K/34K/1004K cores can share the same cache event map. */
834static const struct mips_perf_event mipsxxcore_cache_map
835 [PERF_COUNT_HW_CACHE_MAX]
836 [PERF_COUNT_HW_CACHE_OP_MAX]
837 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
838[C(L1D)] = {
839 /*
840 * Like some other architectures (e.g. ARM), the performance
841 * counters don't differentiate between read and write
842 * accesses/misses, so this isn't strictly correct, but it's the
843 * best we can do. Writes and reads get combined.
844 */
845 [C(OP_READ)] = {
846 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
847 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
848 },
849 [C(OP_WRITE)] = {
850 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
851 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
852 },
853 [C(OP_PREFETCH)] = {
854 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
855 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
856 },
857},
858[C(L1I)] = {
859 [C(OP_READ)] = {
860 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
861 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
862 },
863 [C(OP_WRITE)] = {
864 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
865 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
866 },
867 [C(OP_PREFETCH)] = {
868 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
869 /*
870 * Note that MIPS has only "hit" events countable for
871 * the prefetch operation.
872 */
873 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
874 },
875},
876[C(LL)] = {
877 [C(OP_READ)] = {
878 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
879 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
880 },
881 [C(OP_WRITE)] = {
882 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
883 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
884 },
885 [C(OP_PREFETCH)] = {
886 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
887 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
888 },
889},
890[C(DTLB)] = {
891 [C(OP_READ)] = {
892 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
893 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
894 },
895 [C(OP_WRITE)] = {
896 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
897 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
898 },
899 [C(OP_PREFETCH)] = {
900 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
901 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
902 },
903},
904[C(ITLB)] = {
905 [C(OP_READ)] = {
906 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
907 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
908 },
909 [C(OP_WRITE)] = {
910 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
911 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
912 },
913 [C(OP_PREFETCH)] = {
914 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
915 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
916 },
917},
918[C(BPU)] = {
919 /* Using the same code for *HW_BRANCH* */
920 [C(OP_READ)] = {
921 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
922 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
923 },
924 [C(OP_WRITE)] = {
925 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
926 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
927 },
928 [C(OP_PREFETCH)] = {
929 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
930 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
931 },
932},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200933[C(NODE)] = {
934 [C(OP_READ)] = {
935 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
936 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
937 },
938 [C(OP_WRITE)] = {
939 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
940 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
941 },
942 [C(OP_PREFETCH)] = {
943 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
944 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
945 },
946},
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800947};
948
949/* 74K core has completely different cache event map. */
950static const struct mips_perf_event mipsxx74Kcore_cache_map
951 [PERF_COUNT_HW_CACHE_MAX]
952 [PERF_COUNT_HW_CACHE_OP_MAX]
953 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
954[C(L1D)] = {
955 /*
956 * Like some other architectures (e.g. ARM), the performance
957 * counters don't differentiate between read and write
958 * accesses/misses, so this isn't strictly correct, but it's the
959 * best we can do. Writes and reads get combined.
960 */
961 [C(OP_READ)] = {
962 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
963 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
964 },
965 [C(OP_WRITE)] = {
966 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
967 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
968 },
969 [C(OP_PREFETCH)] = {
970 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
971 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
972 },
973},
974[C(L1I)] = {
975 [C(OP_READ)] = {
976 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
977 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
978 },
979 [C(OP_WRITE)] = {
980 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
981 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
982 },
983 [C(OP_PREFETCH)] = {
984 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
985 /*
986 * Note that MIPS has only "hit" events countable for
987 * the prefetch operation.
988 */
989 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
990 },
991},
992[C(LL)] = {
993 [C(OP_READ)] = {
994 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
995 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
996 },
997 [C(OP_WRITE)] = {
998 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
999 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
1000 },
1001 [C(OP_PREFETCH)] = {
1002 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1003 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1004 },
1005},
1006[C(DTLB)] = {
1007 /* 74K core does not have specific DTLB events. */
1008 [C(OP_READ)] = {
1009 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1010 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1011 },
1012 [C(OP_WRITE)] = {
1013 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1014 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1015 },
1016 [C(OP_PREFETCH)] = {
1017 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1018 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1019 },
1020},
1021[C(ITLB)] = {
1022 [C(OP_READ)] = {
1023 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1024 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1025 },
1026 [C(OP_WRITE)] = {
1027 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
1028 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
1029 },
1030 [C(OP_PREFETCH)] = {
1031 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1032 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1033 },
1034},
1035[C(BPU)] = {
1036 /* Using the same code for *HW_BRANCH* */
1037 [C(OP_READ)] = {
1038 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1039 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1040 },
1041 [C(OP_WRITE)] = {
1042 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1043 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1044 },
1045 [C(OP_PREFETCH)] = {
1046 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1047 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1048 },
1049},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +02001050[C(NODE)] = {
1051 [C(OP_READ)] = {
1052 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1053 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1054 },
1055 [C(OP_WRITE)] = {
1056 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1057 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1058 },
1059 [C(OP_PREFETCH)] = {
1060 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1061 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1062 },
1063},
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001064};
1065
David Daney939991c2011-09-24 02:29:55 +02001066
1067static const struct mips_perf_event octeon_cache_map
1068 [PERF_COUNT_HW_CACHE_MAX]
1069 [PERF_COUNT_HW_CACHE_OP_MAX]
1070 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
1071[C(L1D)] = {
1072 [C(OP_READ)] = {
1073 [C(RESULT_ACCESS)] = { 0x2b, CNTR_ALL },
1074 [C(RESULT_MISS)] = { 0x2e, CNTR_ALL },
1075 },
1076 [C(OP_WRITE)] = {
1077 [C(RESULT_ACCESS)] = { 0x30, CNTR_ALL },
1078 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1079 },
1080 [C(OP_PREFETCH)] = {
1081 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1082 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1083 },
1084},
1085[C(L1I)] = {
1086 [C(OP_READ)] = {
1087 [C(RESULT_ACCESS)] = { 0x18, CNTR_ALL },
1088 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1089 },
1090 [C(OP_WRITE)] = {
1091 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1092 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1093 },
1094 [C(OP_PREFETCH)] = {
1095 [C(RESULT_ACCESS)] = { 0x19, CNTR_ALL },
1096 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1097 },
1098},
1099[C(LL)] = {
1100 [C(OP_READ)] = {
1101 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1102 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1103 },
1104 [C(OP_WRITE)] = {
1105 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1106 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1107 },
1108 [C(OP_PREFETCH)] = {
1109 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1110 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1111 },
1112},
1113[C(DTLB)] = {
1114 /*
1115 * Only general DTLB misses are counted use the same event for
1116 * read and write.
1117 */
1118 [C(OP_READ)] = {
1119 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1120 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1121 },
1122 [C(OP_WRITE)] = {
1123 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1124 [C(RESULT_MISS)] = { 0x35, CNTR_ALL },
1125 },
1126 [C(OP_PREFETCH)] = {
1127 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1128 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1129 },
1130},
1131[C(ITLB)] = {
1132 [C(OP_READ)] = {
1133 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1134 [C(RESULT_MISS)] = { 0x37, CNTR_ALL },
1135 },
1136 [C(OP_WRITE)] = {
1137 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1138 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1139 },
1140 [C(OP_PREFETCH)] = {
1141 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1142 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1143 },
1144},
1145[C(BPU)] = {
1146 /* Using the same code for *HW_BRANCH* */
1147 [C(OP_READ)] = {
1148 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1149 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1150 },
1151 [C(OP_WRITE)] = {
1152 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1153 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1154 },
1155 [C(OP_PREFETCH)] = {
1156 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1157 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1158 },
1159},
1160};
1161
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001162#ifdef CONFIG_MIPS_MT_SMP
David Daney4409af32011-09-24 02:29:55 +02001163static void check_and_calc_range(struct perf_event *event,
1164 const struct mips_perf_event *pev)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001165{
1166 struct hw_perf_event *hwc = &event->hw;
1167
1168 if (event->cpu >= 0) {
1169 if (pev->range > V) {
1170 /*
1171 * The user selected an event that is processor
1172 * wide, while expecting it to be VPE wide.
1173 */
1174 hwc->config_base |= M_TC_EN_ALL;
1175 } else {
1176 /*
1177 * FIXME: cpu_data[event->cpu].vpe_id reports 0
1178 * for both CPUs.
1179 */
1180 hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
1181 hwc->config_base |= M_TC_EN_VPE;
1182 }
1183 } else
1184 hwc->config_base |= M_TC_EN_ALL;
1185}
1186#else
David Daney4409af32011-09-24 02:29:55 +02001187static void check_and_calc_range(struct perf_event *event,
1188 const struct mips_perf_event *pev)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001189{
1190}
1191#endif
1192
1193static int __hw_perf_event_init(struct perf_event *event)
1194{
1195 struct perf_event_attr *attr = &event->attr;
1196 struct hw_perf_event *hwc = &event->hw;
1197 const struct mips_perf_event *pev;
1198 int err;
1199
1200 /* Returning MIPS event descriptor for generic perf event. */
1201 if (PERF_TYPE_HARDWARE == event->attr.type) {
1202 if (event->attr.config >= PERF_COUNT_HW_MAX)
1203 return -EINVAL;
1204 pev = mipspmu_map_general_event(event->attr.config);
1205 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1206 pev = mipspmu_map_cache_event(event->attr.config);
1207 } else if (PERF_TYPE_RAW == event->attr.type) {
1208 /* We are working on the global raw event. */
1209 mutex_lock(&raw_event_mutex);
David Daney82091562011-09-24 02:29:55 +02001210 pev = mipspmu.map_raw_event(event->attr.config);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001211 } else {
1212 /* The event type is not (yet) supported. */
1213 return -EOPNOTSUPP;
1214 }
1215
1216 if (IS_ERR(pev)) {
1217 if (PERF_TYPE_RAW == event->attr.type)
1218 mutex_unlock(&raw_event_mutex);
1219 return PTR_ERR(pev);
1220 }
1221
1222 /*
1223 * We allow max flexibility on how each individual counter shared
1224 * by the single CPU operates (the mode exclusion and the range).
1225 */
1226 hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
1227
1228 /* Calculate range bits and validate it. */
1229 if (num_possible_cpus() > 1)
1230 check_and_calc_range(event, pev);
1231
1232 hwc->event_base = mipspmu_perf_event_encode(pev);
1233 if (PERF_TYPE_RAW == event->attr.type)
1234 mutex_unlock(&raw_event_mutex);
1235
1236 if (!attr->exclude_user)
1237 hwc->config_base |= M_PERFCTL_USER;
1238 if (!attr->exclude_kernel) {
1239 hwc->config_base |= M_PERFCTL_KERNEL;
1240 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1241 hwc->config_base |= M_PERFCTL_EXL;
1242 }
1243 if (!attr->exclude_hv)
1244 hwc->config_base |= M_PERFCTL_SUPERVISOR;
1245
1246 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1247 /*
1248 * The event can belong to another cpu. We do not assign a local
1249 * counter for it for now.
1250 */
1251 hwc->idx = -1;
1252 hwc->config = 0;
1253
1254 if (!hwc->sample_period) {
David Daney82091562011-09-24 02:29:55 +02001255 hwc->sample_period = mipspmu.max_period;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001256 hwc->last_period = hwc->sample_period;
1257 local64_set(&hwc->period_left, hwc->sample_period);
1258 }
1259
1260 err = 0;
Deng-Cheng Zhuff5d7262011-11-22 03:28:48 +08001261 if (event->group_leader != event)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001262 err = validate_group(event);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001263
1264 event->destroy = hw_perf_event_destroy;
Deng-Cheng Zhuff5d7262011-11-22 03:28:48 +08001265
1266 if (err)
1267 event->destroy(event);
1268
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001269 return err;
1270}
1271
1272static void pause_local_counters(void)
1273{
1274 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
David Daney82091562011-09-24 02:29:55 +02001275 int ctr = mipspmu.num_counters;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001276 unsigned long flags;
1277
1278 local_irq_save(flags);
David Daney82091562011-09-24 02:29:55 +02001279 do {
1280 ctr--;
1281 cpuc->saved_ctrl[ctr] = mipsxx_pmu_read_control(ctr);
1282 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr] &
1283 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1284 } while (ctr > 0);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001285 local_irq_restore(flags);
1286}
1287
1288static void resume_local_counters(void)
1289{
1290 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
David Daney82091562011-09-24 02:29:55 +02001291 int ctr = mipspmu.num_counters;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001292
David Daney82091562011-09-24 02:29:55 +02001293 do {
1294 ctr--;
1295 mipsxx_pmu_write_control(ctr, cpuc->saved_ctrl[ctr]);
1296 } while (ctr > 0);
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001297}
1298
1299static int mipsxx_pmu_handle_shared_irq(void)
1300{
1301 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1302 struct perf_sample_data data;
David Daney82091562011-09-24 02:29:55 +02001303 unsigned int counters = mipspmu.num_counters;
1304 u64 counter;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001305 int handled = IRQ_NONE;
1306 struct pt_regs *regs;
1307
1308 if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
1309 return handled;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001310 /*
1311 * First we pause the local counters, so that when we are locked
1312 * here, the counters are all paused. When it gets locked due to
1313 * perf_disable(), the timer interrupt handler will be delayed.
1314 *
1315 * See also mipsxx_pmu_start().
1316 */
1317 pause_local_counters();
1318#ifdef CONFIG_MIPS_MT_SMP
1319 read_lock(&pmuint_rwlock);
1320#endif
1321
1322 regs = get_irq_regs();
1323
1324 perf_sample_data_init(&data, 0);
1325
1326 switch (counters) {
1327#define HANDLE_COUNTER(n) \
1328 case n + 1: \
1329 if (test_bit(n, cpuc->used_mask)) { \
David Daney82091562011-09-24 02:29:55 +02001330 counter = mipspmu.read_counter(n); \
1331 if (counter & mipspmu.overflow) { \
1332 handle_associated_event(cpuc, n, &data, regs); \
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001333 handled = IRQ_HANDLED; \
1334 } \
1335 }
1336 HANDLE_COUNTER(3)
1337 HANDLE_COUNTER(2)
1338 HANDLE_COUNTER(1)
1339 HANDLE_COUNTER(0)
1340 }
1341
1342 /*
1343 * Do all the work for the pending perf events. We can do this
1344 * in here because the performance counter interrupt is a regular
1345 * interrupt, not NMI.
1346 */
1347 if (handled == IRQ_HANDLED)
Deng-Cheng Zhu91f01732011-01-21 16:19:17 +08001348 irq_work_run();
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001349
1350#ifdef CONFIG_MIPS_MT_SMP
1351 read_unlock(&pmuint_rwlock);
1352#endif
1353 resume_local_counters();
1354 return handled;
1355}
1356
David Daney4409af32011-09-24 02:29:55 +02001357static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001358{
1359 return mipsxx_pmu_handle_shared_irq();
1360}
1361
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001362/* 24K */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001363#define IS_BOTH_COUNTERS_24K_EVENT(b) \
1364 ((b) == 0 || (b) == 1 || (b) == 11)
1365
1366/* 34K */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001367#define IS_BOTH_COUNTERS_34K_EVENT(b) \
1368 ((b) == 0 || (b) == 1 || (b) == 11)
1369#ifdef CONFIG_MIPS_MT_SMP
1370#define IS_RANGE_P_34K_EVENT(r, b) \
1371 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1372 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
1373 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
1374 ((b) >= 64 && (b) <= 67))
1375#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1376#endif
1377
1378/* 74K */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001379#define IS_BOTH_COUNTERS_74K_EVENT(b) \
1380 ((b) == 0 || (b) == 1)
1381
1382/* 1004K */
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001383#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1384 ((b) == 0 || (b) == 1 || (b) == 11)
1385#ifdef CONFIG_MIPS_MT_SMP
1386#define IS_RANGE_P_1004K_EVENT(r, b) \
1387 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1388 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
1389 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
1390 (r) == 188 || (b) == 61 || (b) == 62 || \
1391 ((b) >= 64 && (b) <= 67))
1392#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1393#endif
1394
1395/*
1396 * User can use 0-255 raw events, where 0-127 for the events of even
1397 * counters, and 128-255 for odd counters. Note that bit 7 is used to
1398 * indicate the parity. So, for example, when user wants to take the
1399 * Event Num of 15 for odd counters (by referring to the user manual),
1400 * then 128 needs to be added to 15 as the input for the event config,
1401 * i.e., 143 (0x8F) to be used.
1402 */
David Daney4409af32011-09-24 02:29:55 +02001403static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001404{
1405 unsigned int raw_id = config & 0xff;
1406 unsigned int base_id = raw_id & 0x7f;
1407
Deng-Cheng Zhu2c1b54d2011-11-22 03:28:45 +08001408 raw_event.event_id = base_id;
1409
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001410 switch (current_cpu_type()) {
1411 case CPU_24K:
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001412 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1413 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1414 else
1415 raw_event.cntr_mask =
1416 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1417#ifdef CONFIG_MIPS_MT_SMP
1418 /*
1419 * This is actually doing nothing. Non-multithreading
1420 * CPUs will not check and calculate the range.
1421 */
1422 raw_event.range = P;
1423#endif
1424 break;
1425 case CPU_34K:
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001426 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1427 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1428 else
1429 raw_event.cntr_mask =
1430 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1431#ifdef CONFIG_MIPS_MT_SMP
1432 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1433 raw_event.range = P;
1434 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1435 raw_event.range = V;
1436 else
1437 raw_event.range = T;
1438#endif
1439 break;
1440 case CPU_74K:
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001441 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1442 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1443 else
1444 raw_event.cntr_mask =
1445 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1446#ifdef CONFIG_MIPS_MT_SMP
1447 raw_event.range = P;
1448#endif
1449 break;
1450 case CPU_1004K:
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001451 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1452 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1453 else
1454 raw_event.cntr_mask =
1455 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1456#ifdef CONFIG_MIPS_MT_SMP
1457 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1458 raw_event.range = P;
1459 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1460 raw_event.range = V;
1461 else
1462 raw_event.range = T;
1463#endif
1464 break;
1465 }
1466
1467 return &raw_event;
1468}
1469
David Daney939991c2011-09-24 02:29:55 +02001470static const struct mips_perf_event *octeon_pmu_map_raw_event(u64 config)
1471{
1472 unsigned int raw_id = config & 0xff;
1473 unsigned int base_id = raw_id & 0x7f;
1474
1475
1476 raw_event.cntr_mask = CNTR_ALL;
1477 raw_event.event_id = base_id;
1478
1479 if (current_cpu_type() == CPU_CAVIUM_OCTEON2) {
1480 if (base_id > 0x42)
1481 return ERR_PTR(-EOPNOTSUPP);
1482 } else {
1483 if (base_id > 0x3a)
1484 return ERR_PTR(-EOPNOTSUPP);
1485 }
1486
1487 switch (base_id) {
1488 case 0x00:
1489 case 0x0f:
1490 case 0x1e:
1491 case 0x1f:
1492 case 0x2f:
1493 case 0x34:
1494 case 0x3b ... 0x3f:
1495 return ERR_PTR(-EOPNOTSUPP);
1496 default:
1497 break;
1498 }
1499
1500 return &raw_event;
1501}
1502
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001503static int __init
1504init_hw_perf_events(void)
1505{
1506 int counters, irq;
David Daney82091562011-09-24 02:29:55 +02001507 int counter_bits;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001508
1509 pr_info("Performance counters: ");
1510
1511 counters = n_counters();
1512 if (counters == 0) {
1513 pr_cont("No available PMU.\n");
1514 return -ENODEV;
1515 }
1516
1517#ifdef CONFIG_MIPS_MT_SMP
1518 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1519 if (!cpu_has_mipsmt_pertccounters)
1520 counters = counters_total_to_per_cpu(counters);
1521#endif
1522
1523#ifdef MSC01E_INT_BASE
1524 if (cpu_has_veic) {
1525 /*
1526 * Using platform specific interrupt controller defines.
1527 */
1528 irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1529 } else {
1530#endif
1531 if (cp0_perfcount_irq >= 0)
1532 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1533 else
1534 irq = -1;
1535#ifdef MSC01E_INT_BASE
1536 }
1537#endif
1538
David Daney82091562011-09-24 02:29:55 +02001539 mipspmu.map_raw_event = mipsxx_pmu_map_raw_event;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001540
1541 switch (current_cpu_type()) {
1542 case CPU_24K:
David Daney82091562011-09-24 02:29:55 +02001543 mipspmu.name = "mips/24K";
1544 mipspmu.general_event_map = &mipsxxcore_event_map;
1545 mipspmu.cache_event_map = &mipsxxcore_cache_map;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001546 break;
1547 case CPU_34K:
David Daney82091562011-09-24 02:29:55 +02001548 mipspmu.name = "mips/34K";
1549 mipspmu.general_event_map = &mipsxxcore_event_map;
1550 mipspmu.cache_event_map = &mipsxxcore_cache_map;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001551 break;
1552 case CPU_74K:
David Daney82091562011-09-24 02:29:55 +02001553 mipspmu.name = "mips/74K";
1554 mipspmu.general_event_map = &mipsxx74Kcore_event_map;
1555 mipspmu.cache_event_map = &mipsxx74Kcore_cache_map;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001556 break;
1557 case CPU_1004K:
David Daney82091562011-09-24 02:29:55 +02001558 mipspmu.name = "mips/1004K";
1559 mipspmu.general_event_map = &mipsxxcore_event_map;
1560 mipspmu.cache_event_map = &mipsxxcore_cache_map;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001561 break;
David Daney939991c2011-09-24 02:29:55 +02001562 case CPU_CAVIUM_OCTEON:
1563 case CPU_CAVIUM_OCTEON_PLUS:
1564 case CPU_CAVIUM_OCTEON2:
1565 mipspmu.name = "octeon";
1566 mipspmu.general_event_map = &octeon_event_map;
1567 mipspmu.cache_event_map = &octeon_cache_map;
1568 mipspmu.map_raw_event = octeon_pmu_map_raw_event;
1569 break;
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001570 default:
1571 pr_cont("Either hardware does not support performance "
1572 "counters, or not yet implemented.\n");
1573 return -ENODEV;
1574 }
1575
David Daney82091562011-09-24 02:29:55 +02001576 mipspmu.num_counters = counters;
1577 mipspmu.irq = irq;
1578
1579 if (read_c0_perfctrl0() & M_PERFCTL_WIDE) {
1580 mipspmu.max_period = (1ULL << 63) - 1;
1581 mipspmu.valid_count = (1ULL << 63) - 1;
1582 mipspmu.overflow = 1ULL << 63;
1583 mipspmu.read_counter = mipsxx_pmu_read_counter_64;
1584 mipspmu.write_counter = mipsxx_pmu_write_counter_64;
1585 counter_bits = 64;
1586 } else {
1587 mipspmu.max_period = (1ULL << 31) - 1;
1588 mipspmu.valid_count = (1ULL << 31) - 1;
1589 mipspmu.overflow = 1ULL << 31;
1590 mipspmu.read_counter = mipsxx_pmu_read_counter;
1591 mipspmu.write_counter = mipsxx_pmu_write_counter;
1592 counter_bits = 32;
1593 }
1594
1595 on_each_cpu(reset_counters, (void *)(long)counters, 1);
1596
1597 pr_cont("%s PMU enabled, %d %d-bit counters available to each "
1598 "CPU, irq %d%s\n", mipspmu.name, counters, counter_bits, irq,
1599 irq < 0 ? " (share with timer interrupt)" : "");
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001600
Deng-Cheng Zhu404ff632011-01-21 16:19:18 +08001601 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1602
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001603 return 0;
1604}
Peter Zijlstra004417a2010-11-25 18:38:29 +01001605early_initcall(init_hw_perf_events);