blob: eb74dce69c1b55c2e977c4ef3728fc8356e9bc74 [file] [log] [blame]
David Daneye5dcb582011-09-24 02:29:55 +02001/*
2 * Linux performance counter support for MIPS.
3 *
4 * Copyright (C) 2010 MIPS Technologies, Inc.
5 * Author: Deng-Cheng Zhu
6 *
7 * This code is based on the implementation for ARM, which is in turn
8 * based on the sparc64 perf event code and the x86 code. Performance
9 * counter access is based on the MIPS Oprofile code. And the callchain
10 * support references the code of MIPS stacktrace.c.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License version 2 as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/cpumask.h>
18#include <linux/interrupt.h>
19#include <linux/smp.h>
20#include <linux/kernel.h>
21#include <linux/perf_event.h>
22#include <linux/uaccess.h>
23
24#include <asm/irq.h>
25#include <asm/irq_regs.h>
26#include <asm/stacktrace.h>
27#include <asm/time.h> /* For perf_irq */
28
29/* These are for 32bit counters. For 64bit ones, define them accordingly. */
30#define MAX_PERIOD ((1ULL << 32) - 1)
31#define VALID_COUNT 0x7fffffff
32#define TOTAL_BITS 32
33#define HIGHEST_BIT 31
34
35#define MIPS_MAX_HWEVENTS 4
36
37struct cpu_hw_events {
38 /* Array of events on this cpu. */
39 struct perf_event *events[MIPS_MAX_HWEVENTS];
40
41 /*
42 * Set the bit (indexed by the counter number) when the counter
43 * is used for an event.
44 */
45 unsigned long used_mask[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
46
47 /*
48 * The borrowed MSB for the performance counter. A MIPS performance
49 * counter uses its bit 31 (for 32bit counters) or bit 63 (for 64bit
50 * counters) as a factor of determining whether a counter overflow
51 * should be signaled. So here we use a separate MSB for each
52 * counter to make things easy.
53 */
54 unsigned long msbs[BITS_TO_LONGS(MIPS_MAX_HWEVENTS)];
55
56 /*
57 * Software copy of the control register for each performance counter.
58 * MIPS CPUs vary in performance counters. They use this differently,
59 * and even may not use it.
60 */
61 unsigned int saved_ctrl[MIPS_MAX_HWEVENTS];
62};
63DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
64 .saved_ctrl = {0},
65};
66
67/* The description of MIPS performance events. */
68struct mips_perf_event {
69 unsigned int event_id;
70 /*
71 * MIPS performance counters are indexed starting from 0.
72 * CNTR_EVEN indicates the indexes of the counters to be used are
73 * even numbers.
74 */
75 unsigned int cntr_mask;
76 #define CNTR_EVEN 0x55555555
77 #define CNTR_ODD 0xaaaaaaaa
78#ifdef CONFIG_MIPS_MT_SMP
79 enum {
80 T = 0,
81 V = 1,
82 P = 2,
83 } range;
84#else
85 #define T
86 #define V
87 #define P
88#endif
89};
90
91static struct mips_perf_event raw_event;
92static DEFINE_MUTEX(raw_event_mutex);
93
94#define UNSUPPORTED_PERF_EVENT_ID 0xffffffff
95#define C(x) PERF_COUNT_HW_CACHE_##x
96
97struct mips_pmu {
98 const char *name;
99 int irq;
100 irqreturn_t (*handle_irq)(int irq, void *dev);
101 int (*handle_shared_irq)(void);
102 void (*start)(void);
103 void (*stop)(void);
104 int (*alloc_counter)(struct cpu_hw_events *cpuc,
105 struct hw_perf_event *hwc);
106 u64 (*read_counter)(unsigned int idx);
107 void (*write_counter)(unsigned int idx, u64 val);
108 void (*enable_event)(struct hw_perf_event *evt, int idx);
109 void (*disable_event)(int idx);
110 const struct mips_perf_event *(*map_raw_event)(u64 config);
111 const struct mips_perf_event (*general_event_map)[PERF_COUNT_HW_MAX];
112 const struct mips_perf_event (*cache_event_map)
113 [PERF_COUNT_HW_CACHE_MAX]
114 [PERF_COUNT_HW_CACHE_OP_MAX]
115 [PERF_COUNT_HW_CACHE_RESULT_MAX];
116 unsigned int num_counters;
117};
118
119static const struct mips_pmu *mipspmu;
120
121static int mipspmu_event_set_period(struct perf_event *event,
122 struct hw_perf_event *hwc,
123 int idx)
124{
125 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
126 s64 left = local64_read(&hwc->period_left);
127 s64 period = hwc->sample_period;
128 int ret = 0;
129 u64 uleft;
130 unsigned long flags;
131
132 if (unlikely(left <= -period)) {
133 left = period;
134 local64_set(&hwc->period_left, left);
135 hwc->last_period = period;
136 ret = 1;
137 }
138
139 if (unlikely(left <= 0)) {
140 left += period;
141 local64_set(&hwc->period_left, left);
142 hwc->last_period = period;
143 ret = 1;
144 }
145
146 if (left > (s64)MAX_PERIOD)
147 left = MAX_PERIOD;
148
149 local64_set(&hwc->prev_count, (u64)-left);
150
151 local_irq_save(flags);
152 uleft = (u64)(-left) & MAX_PERIOD;
153 uleft > VALID_COUNT ?
154 set_bit(idx, cpuc->msbs) : clear_bit(idx, cpuc->msbs);
155 mipspmu->write_counter(idx, (u64)(-left) & VALID_COUNT);
156 local_irq_restore(flags);
157
158 perf_event_update_userpage(event);
159
160 return ret;
161}
162
163static void mipspmu_event_update(struct perf_event *event,
164 struct hw_perf_event *hwc,
165 int idx)
166{
167 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
168 unsigned long flags;
169 int shift = 64 - TOTAL_BITS;
170 s64 prev_raw_count, new_raw_count;
171 u64 delta;
172
173again:
174 prev_raw_count = local64_read(&hwc->prev_count);
175 local_irq_save(flags);
176 /* Make the counter value be a "real" one. */
177 new_raw_count = mipspmu->read_counter(idx);
178 if (new_raw_count & (test_bit(idx, cpuc->msbs) << HIGHEST_BIT)) {
179 new_raw_count &= VALID_COUNT;
180 clear_bit(idx, cpuc->msbs);
181 } else
182 new_raw_count |= (test_bit(idx, cpuc->msbs) << HIGHEST_BIT);
183 local_irq_restore(flags);
184
185 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
186 new_raw_count) != prev_raw_count)
187 goto again;
188
189 delta = (new_raw_count << shift) - (prev_raw_count << shift);
190 delta >>= shift;
191
192 local64_add(delta, &event->count);
193 local64_sub(delta, &hwc->period_left);
194}
195
196static void mipspmu_start(struct perf_event *event, int flags)
197{
198 struct hw_perf_event *hwc = &event->hw;
199
200 if (!mipspmu)
201 return;
202
203 if (flags & PERF_EF_RELOAD)
204 WARN_ON_ONCE(!(hwc->state & PERF_HES_UPTODATE));
205
206 hwc->state = 0;
207
208 /* Set the period for the event. */
209 mipspmu_event_set_period(event, hwc, hwc->idx);
210
211 /* Enable the event. */
212 mipspmu->enable_event(hwc, hwc->idx);
213}
214
215static void mipspmu_stop(struct perf_event *event, int flags)
216{
217 struct hw_perf_event *hwc = &event->hw;
218
219 if (!mipspmu)
220 return;
221
222 if (!(hwc->state & PERF_HES_STOPPED)) {
223 /* We are working on a local event. */
224 mipspmu->disable_event(hwc->idx);
225 barrier();
226 mipspmu_event_update(event, hwc, hwc->idx);
227 hwc->state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
228 }
229}
230
231static int mipspmu_add(struct perf_event *event, int flags)
232{
233 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
234 struct hw_perf_event *hwc = &event->hw;
235 int idx;
236 int err = 0;
237
238 perf_pmu_disable(event->pmu);
239
240 /* To look for a free counter for this event. */
241 idx = mipspmu->alloc_counter(cpuc, hwc);
242 if (idx < 0) {
243 err = idx;
244 goto out;
245 }
246
247 /*
248 * If there is an event in the counter we are going to use then
249 * make sure it is disabled.
250 */
251 event->hw.idx = idx;
252 mipspmu->disable_event(idx);
253 cpuc->events[idx] = event;
254
255 hwc->state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
256 if (flags & PERF_EF_START)
257 mipspmu_start(event, PERF_EF_RELOAD);
258
259 /* Propagate our changes to the userspace mapping. */
260 perf_event_update_userpage(event);
261
262out:
263 perf_pmu_enable(event->pmu);
264 return err;
265}
266
267static void mipspmu_del(struct perf_event *event, int flags)
268{
269 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
270 struct hw_perf_event *hwc = &event->hw;
271 int idx = hwc->idx;
272
273 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
274
275 mipspmu_stop(event, PERF_EF_UPDATE);
276 cpuc->events[idx] = NULL;
277 clear_bit(idx, cpuc->used_mask);
278
279 perf_event_update_userpage(event);
280}
281
282static void mipspmu_read(struct perf_event *event)
283{
284 struct hw_perf_event *hwc = &event->hw;
285
286 /* Don't read disabled counters! */
287 if (hwc->idx < 0)
288 return;
289
290 mipspmu_event_update(event, hwc, hwc->idx);
291}
292
293static void mipspmu_enable(struct pmu *pmu)
294{
295 if (mipspmu)
296 mipspmu->start();
297}
298
299static void mipspmu_disable(struct pmu *pmu)
300{
301 if (mipspmu)
302 mipspmu->stop();
303}
304
305static atomic_t active_events = ATOMIC_INIT(0);
306static DEFINE_MUTEX(pmu_reserve_mutex);
307static int (*save_perf_irq)(void);
308
309static int mipspmu_get_irq(void)
310{
311 int err;
312
313 if (mipspmu->irq >= 0) {
314 /* Request my own irq handler. */
315 err = request_irq(mipspmu->irq, mipspmu->handle_irq,
316 IRQF_DISABLED | IRQF_NOBALANCING,
317 "mips_perf_pmu", NULL);
318 if (err) {
319 pr_warning("Unable to request IRQ%d for MIPS "
320 "performance counters!\n", mipspmu->irq);
321 }
322 } else if (cp0_perfcount_irq < 0) {
323 /*
324 * We are sharing the irq number with the timer interrupt.
325 */
326 save_perf_irq = perf_irq;
327 perf_irq = mipspmu->handle_shared_irq;
328 err = 0;
329 } else {
330 pr_warning("The platform hasn't properly defined its "
331 "interrupt controller.\n");
332 err = -ENOENT;
333 }
334
335 return err;
336}
337
338static void mipspmu_free_irq(void)
339{
340 if (mipspmu->irq >= 0)
341 free_irq(mipspmu->irq, NULL);
342 else if (cp0_perfcount_irq < 0)
343 perf_irq = save_perf_irq;
344}
345
346/*
347 * mipsxx/rm9000/loongson2 have different performance counters, they have
348 * specific low-level init routines.
349 */
350static void reset_counters(void *arg);
351static int __hw_perf_event_init(struct perf_event *event);
352
353static void hw_perf_event_destroy(struct perf_event *event)
354{
355 if (atomic_dec_and_mutex_lock(&active_events,
356 &pmu_reserve_mutex)) {
357 /*
358 * We must not call the destroy function with interrupts
359 * disabled.
360 */
361 on_each_cpu(reset_counters,
362 (void *)(long)mipspmu->num_counters, 1);
363 mipspmu_free_irq();
364 mutex_unlock(&pmu_reserve_mutex);
365 }
366}
367
368static int mipspmu_event_init(struct perf_event *event)
369{
370 int err = 0;
371
372 switch (event->attr.type) {
373 case PERF_TYPE_RAW:
374 case PERF_TYPE_HARDWARE:
375 case PERF_TYPE_HW_CACHE:
376 break;
377
378 default:
379 return -ENOENT;
380 }
381
382 if (!mipspmu || event->cpu >= nr_cpumask_bits ||
383 (event->cpu >= 0 && !cpu_online(event->cpu)))
384 return -ENODEV;
385
386 if (!atomic_inc_not_zero(&active_events)) {
387 if (atomic_read(&active_events) > MIPS_MAX_HWEVENTS) {
388 atomic_dec(&active_events);
389 return -ENOSPC;
390 }
391
392 mutex_lock(&pmu_reserve_mutex);
393 if (atomic_read(&active_events) == 0)
394 err = mipspmu_get_irq();
395
396 if (!err)
397 atomic_inc(&active_events);
398 mutex_unlock(&pmu_reserve_mutex);
399 }
400
401 if (err)
402 return err;
403
404 err = __hw_perf_event_init(event);
405 if (err)
406 hw_perf_event_destroy(event);
407
408 return err;
409}
410
411static struct pmu pmu = {
412 .pmu_enable = mipspmu_enable,
413 .pmu_disable = mipspmu_disable,
414 .event_init = mipspmu_event_init,
415 .add = mipspmu_add,
416 .del = mipspmu_del,
417 .start = mipspmu_start,
418 .stop = mipspmu_stop,
419 .read = mipspmu_read,
420};
421
422static unsigned int mipspmu_perf_event_encode(const struct mips_perf_event *pev)
423{
424/*
425 * Top 8 bits for range, next 16 bits for cntr_mask, lowest 8 bits for
426 * event_id.
427 */
428#ifdef CONFIG_MIPS_MT_SMP
429 return ((unsigned int)pev->range << 24) |
430 (pev->cntr_mask & 0xffff00) |
431 (pev->event_id & 0xff);
432#else
433 return (pev->cntr_mask & 0xffff00) |
434 (pev->event_id & 0xff);
435#endif
436}
437
438static const struct mips_perf_event *mipspmu_map_general_event(int idx)
439{
440 const struct mips_perf_event *pev;
441
442 pev = ((*mipspmu->general_event_map)[idx].event_id ==
443 UNSUPPORTED_PERF_EVENT_ID ? ERR_PTR(-EOPNOTSUPP) :
444 &(*mipspmu->general_event_map)[idx]);
445
446 return pev;
447}
448
449static const struct mips_perf_event *mipspmu_map_cache_event(u64 config)
450{
451 unsigned int cache_type, cache_op, cache_result;
452 const struct mips_perf_event *pev;
453
454 cache_type = (config >> 0) & 0xff;
455 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
456 return ERR_PTR(-EINVAL);
457
458 cache_op = (config >> 8) & 0xff;
459 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
460 return ERR_PTR(-EINVAL);
461
462 cache_result = (config >> 16) & 0xff;
463 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
464 return ERR_PTR(-EINVAL);
465
466 pev = &((*mipspmu->cache_event_map)
467 [cache_type]
468 [cache_op]
469 [cache_result]);
470
471 if (pev->event_id == UNSUPPORTED_PERF_EVENT_ID)
472 return ERR_PTR(-EOPNOTSUPP);
473
474 return pev;
475
476}
477
478static int validate_event(struct cpu_hw_events *cpuc,
479 struct perf_event *event)
480{
481 struct hw_perf_event fake_hwc = event->hw;
482
483 /* Allow mixed event group. So return 1 to pass validation. */
484 if (event->pmu != &pmu || event->state <= PERF_EVENT_STATE_OFF)
485 return 1;
486
487 return mipspmu->alloc_counter(cpuc, &fake_hwc) >= 0;
488}
489
490static int validate_group(struct perf_event *event)
491{
492 struct perf_event *sibling, *leader = event->group_leader;
493 struct cpu_hw_events fake_cpuc;
494
495 memset(&fake_cpuc, 0, sizeof(fake_cpuc));
496
497 if (!validate_event(&fake_cpuc, leader))
498 return -ENOSPC;
499
500 list_for_each_entry(sibling, &leader->sibling_list, group_entry) {
501 if (!validate_event(&fake_cpuc, sibling))
502 return -ENOSPC;
503 }
504
505 if (!validate_event(&fake_cpuc, event))
506 return -ENOSPC;
507
508 return 0;
509}
510
511/* This is needed by specific irq handlers in perf_event_*.c */
512static void handle_associated_event(struct cpu_hw_events *cpuc,
513 int idx, struct perf_sample_data *data,
514 struct pt_regs *regs)
515{
516 struct perf_event *event = cpuc->events[idx];
517 struct hw_perf_event *hwc = &event->hw;
518
519 mipspmu_event_update(event, hwc, idx);
520 data->period = event->hw.last_period;
521 if (!mipspmu_event_set_period(event, hwc, idx))
522 return;
523
524 if (perf_event_overflow(event, data, regs))
525 mipspmu->disable_event(idx);
526}
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800527
528#define M_CONFIG1_PC (1 << 4)
529
530#define M_PERFCTL_EXL (1UL << 0)
531#define M_PERFCTL_KERNEL (1UL << 1)
532#define M_PERFCTL_SUPERVISOR (1UL << 2)
533#define M_PERFCTL_USER (1UL << 3)
534#define M_PERFCTL_INTERRUPT_ENABLE (1UL << 4)
535#define M_PERFCTL_EVENT(event) (((event) & 0x3ff) << 5)
536#define M_PERFCTL_VPEID(vpe) ((vpe) << 16)
537#define M_PERFCTL_MT_EN(filter) ((filter) << 20)
538#define M_TC_EN_ALL M_PERFCTL_MT_EN(0)
539#define M_TC_EN_VPE M_PERFCTL_MT_EN(1)
540#define M_TC_EN_TC M_PERFCTL_MT_EN(2)
541#define M_PERFCTL_TCID(tcid) ((tcid) << 22)
542#define M_PERFCTL_WIDE (1UL << 30)
543#define M_PERFCTL_MORE (1UL << 31)
544
545#define M_PERFCTL_COUNT_EVENT_WHENEVER (M_PERFCTL_EXL | \
546 M_PERFCTL_KERNEL | \
547 M_PERFCTL_USER | \
548 M_PERFCTL_SUPERVISOR | \
549 M_PERFCTL_INTERRUPT_ENABLE)
550
551#ifdef CONFIG_MIPS_MT_SMP
552#define M_PERFCTL_CONFIG_MASK 0x3fff801f
553#else
554#define M_PERFCTL_CONFIG_MASK 0x1f
555#endif
556#define M_PERFCTL_EVENT_MASK 0xfe0
557
558#define M_COUNTER_OVERFLOW (1UL << 31)
559
560#ifdef CONFIG_MIPS_MT_SMP
561static int cpu_has_mipsmt_pertccounters;
562
563/*
564 * FIXME: For VSMP, vpe_id() is redefined for Perf-events, because
565 * cpu_data[cpuid].vpe_id reports 0 for _both_ CPUs.
566 */
567#if defined(CONFIG_HW_PERF_EVENTS)
568#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
569 0 : smp_processor_id())
570#else
571#define vpe_id() (cpu_has_mipsmt_pertccounters ? \
572 0 : cpu_data[smp_processor_id()].vpe_id)
573#endif
574
575/* Copied from op_model_mipsxx.c */
David Daney4409af32011-09-24 02:29:55 +0200576static unsigned int vpe_shift(void)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800577{
578 if (num_possible_cpus() > 1)
579 return 1;
580
581 return 0;
582}
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800583
David Daney4409af32011-09-24 02:29:55 +0200584static unsigned int counters_total_to_per_cpu(unsigned int counters)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800585{
586 return counters >> vpe_shift();
587}
588
David Daney4409af32011-09-24 02:29:55 +0200589static unsigned int counters_per_cpu_to_total(unsigned int counters)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800590{
591 return counters << vpe_shift();
592}
593
David Daney4409af32011-09-24 02:29:55 +0200594#else /* !CONFIG_MIPS_MT_SMP */
595#define vpe_id() 0
596
597#endif /* CONFIG_MIPS_MT_SMP */
598
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800599#define __define_perf_accessors(r, n, np) \
600 \
David Daney4409af32011-09-24 02:29:55 +0200601static unsigned int r_c0_ ## r ## n(void) \
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800602{ \
603 unsigned int cpu = vpe_id(); \
604 \
605 switch (cpu) { \
606 case 0: \
607 return read_c0_ ## r ## n(); \
608 case 1: \
609 return read_c0_ ## r ## np(); \
610 default: \
611 BUG(); \
612 } \
613 return 0; \
614} \
615 \
David Daney4409af32011-09-24 02:29:55 +0200616static void w_c0_ ## r ## n(unsigned int value) \
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800617{ \
618 unsigned int cpu = vpe_id(); \
619 \
620 switch (cpu) { \
621 case 0: \
622 write_c0_ ## r ## n(value); \
623 return; \
624 case 1: \
625 write_c0_ ## r ## np(value); \
626 return; \
627 default: \
628 BUG(); \
629 } \
630 return; \
631} \
632
633__define_perf_accessors(perfcntr, 0, 2)
634__define_perf_accessors(perfcntr, 1, 3)
635__define_perf_accessors(perfcntr, 2, 0)
636__define_perf_accessors(perfcntr, 3, 1)
637
638__define_perf_accessors(perfctrl, 0, 2)
639__define_perf_accessors(perfctrl, 1, 3)
640__define_perf_accessors(perfctrl, 2, 0)
641__define_perf_accessors(perfctrl, 3, 1)
642
David Daney4409af32011-09-24 02:29:55 +0200643static int __n_counters(void)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800644{
645 if (!(read_c0_config1() & M_CONFIG1_PC))
646 return 0;
647 if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
648 return 1;
649 if (!(read_c0_perfctrl1() & M_PERFCTL_MORE))
650 return 2;
651 if (!(read_c0_perfctrl2() & M_PERFCTL_MORE))
652 return 3;
653
654 return 4;
655}
656
David Daney4409af32011-09-24 02:29:55 +0200657static int n_counters(void)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800658{
659 int counters;
660
661 switch (current_cpu_type()) {
662 case CPU_R10000:
663 counters = 2;
664 break;
665
666 case CPU_R12000:
667 case CPU_R14000:
668 counters = 4;
669 break;
670
671 default:
672 counters = __n_counters();
673 }
674
675 return counters;
676}
677
678static void reset_counters(void *arg)
679{
680 int counters = (int)(long)arg;
681 switch (counters) {
682 case 4:
683 w_c0_perfctrl3(0);
684 w_c0_perfcntr3(0);
685 case 3:
686 w_c0_perfctrl2(0);
687 w_c0_perfcntr2(0);
688 case 2:
689 w_c0_perfctrl1(0);
690 w_c0_perfcntr1(0);
691 case 1:
692 w_c0_perfctrl0(0);
693 w_c0_perfcntr0(0);
694 }
695}
696
David Daney4409af32011-09-24 02:29:55 +0200697static u64 mipsxx_pmu_read_counter(unsigned int idx)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800698{
699 switch (idx) {
700 case 0:
701 return r_c0_perfcntr0();
702 case 1:
703 return r_c0_perfcntr1();
704 case 2:
705 return r_c0_perfcntr2();
706 case 3:
707 return r_c0_perfcntr3();
708 default:
709 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
710 return 0;
711 }
712}
713
David Daney4409af32011-09-24 02:29:55 +0200714static void mipsxx_pmu_write_counter(unsigned int idx, u64 val)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800715{
716 switch (idx) {
717 case 0:
718 w_c0_perfcntr0(val);
719 return;
720 case 1:
721 w_c0_perfcntr1(val);
722 return;
723 case 2:
724 w_c0_perfcntr2(val);
725 return;
726 case 3:
727 w_c0_perfcntr3(val);
728 return;
729 }
730}
731
David Daney4409af32011-09-24 02:29:55 +0200732static unsigned int mipsxx_pmu_read_control(unsigned int idx)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800733{
734 switch (idx) {
735 case 0:
736 return r_c0_perfctrl0();
737 case 1:
738 return r_c0_perfctrl1();
739 case 2:
740 return r_c0_perfctrl2();
741 case 3:
742 return r_c0_perfctrl3();
743 default:
744 WARN_ONCE(1, "Invalid performance counter number (%d)\n", idx);
745 return 0;
746 }
747}
748
David Daney4409af32011-09-24 02:29:55 +0200749static void mipsxx_pmu_write_control(unsigned int idx, unsigned int val)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800750{
751 switch (idx) {
752 case 0:
753 w_c0_perfctrl0(val);
754 return;
755 case 1:
756 w_c0_perfctrl1(val);
757 return;
758 case 2:
759 w_c0_perfctrl2(val);
760 return;
761 case 3:
762 w_c0_perfctrl3(val);
763 return;
764 }
765}
766
767#ifdef CONFIG_MIPS_MT_SMP
768static DEFINE_RWLOCK(pmuint_rwlock);
769#endif
770
771/* 24K/34K/1004K cores can share the same event map. */
772static const struct mips_perf_event mipsxxcore_event_map
773 [PERF_COUNT_HW_MAX] = {
774 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
775 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
776 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
777 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
778 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x02, CNTR_EVEN, T },
779 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x02, CNTR_ODD, T },
780 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
781};
782
783/* 74K core has different branch event code. */
784static const struct mips_perf_event mipsxx74Kcore_event_map
785 [PERF_COUNT_HW_MAX] = {
786 [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD, P },
787 [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD, T },
788 [PERF_COUNT_HW_CACHE_REFERENCES] = { UNSUPPORTED_PERF_EVENT_ID },
789 [PERF_COUNT_HW_CACHE_MISSES] = { UNSUPPORTED_PERF_EVENT_ID },
790 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x27, CNTR_EVEN, T },
791 [PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
792 [PERF_COUNT_HW_BUS_CYCLES] = { UNSUPPORTED_PERF_EVENT_ID },
793};
794
795/* 24K/34K/1004K cores can share the same cache event map. */
796static const struct mips_perf_event mipsxxcore_cache_map
797 [PERF_COUNT_HW_CACHE_MAX]
798 [PERF_COUNT_HW_CACHE_OP_MAX]
799 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
800[C(L1D)] = {
801 /*
802 * Like some other architectures (e.g. ARM), the performance
803 * counters don't differentiate between read and write
804 * accesses/misses, so this isn't strictly correct, but it's the
805 * best we can do. Writes and reads get combined.
806 */
807 [C(OP_READ)] = {
808 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
809 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
810 },
811 [C(OP_WRITE)] = {
812 [C(RESULT_ACCESS)] = { 0x0a, CNTR_EVEN, T },
813 [C(RESULT_MISS)] = { 0x0b, CNTR_EVEN | CNTR_ODD, T },
814 },
815 [C(OP_PREFETCH)] = {
816 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
817 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
818 },
819},
820[C(L1I)] = {
821 [C(OP_READ)] = {
822 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
823 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
824 },
825 [C(OP_WRITE)] = {
826 [C(RESULT_ACCESS)] = { 0x09, CNTR_EVEN, T },
827 [C(RESULT_MISS)] = { 0x09, CNTR_ODD, T },
828 },
829 [C(OP_PREFETCH)] = {
830 [C(RESULT_ACCESS)] = { 0x14, CNTR_EVEN, T },
831 /*
832 * Note that MIPS has only "hit" events countable for
833 * the prefetch operation.
834 */
835 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
836 },
837},
838[C(LL)] = {
839 [C(OP_READ)] = {
840 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
841 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
842 },
843 [C(OP_WRITE)] = {
844 [C(RESULT_ACCESS)] = { 0x15, CNTR_ODD, P },
845 [C(RESULT_MISS)] = { 0x16, CNTR_EVEN, P },
846 },
847 [C(OP_PREFETCH)] = {
848 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
849 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
850 },
851},
852[C(DTLB)] = {
853 [C(OP_READ)] = {
854 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
855 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
856 },
857 [C(OP_WRITE)] = {
858 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
859 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
860 },
861 [C(OP_PREFETCH)] = {
862 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
863 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
864 },
865},
866[C(ITLB)] = {
867 [C(OP_READ)] = {
868 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
869 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
870 },
871 [C(OP_WRITE)] = {
872 [C(RESULT_ACCESS)] = { 0x05, CNTR_EVEN, T },
873 [C(RESULT_MISS)] = { 0x05, CNTR_ODD, T },
874 },
875 [C(OP_PREFETCH)] = {
876 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
877 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
878 },
879},
880[C(BPU)] = {
881 /* Using the same code for *HW_BRANCH* */
882 [C(OP_READ)] = {
883 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
884 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
885 },
886 [C(OP_WRITE)] = {
887 [C(RESULT_ACCESS)] = { 0x02, CNTR_EVEN, T },
888 [C(RESULT_MISS)] = { 0x02, CNTR_ODD, T },
889 },
890 [C(OP_PREFETCH)] = {
891 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
892 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
893 },
894},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +0200895[C(NODE)] = {
896 [C(OP_READ)] = {
897 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
898 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
899 },
900 [C(OP_WRITE)] = {
901 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
902 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
903 },
904 [C(OP_PREFETCH)] = {
905 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
906 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
907 },
908},
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +0800909};
910
911/* 74K core has completely different cache event map. */
912static const struct mips_perf_event mipsxx74Kcore_cache_map
913 [PERF_COUNT_HW_CACHE_MAX]
914 [PERF_COUNT_HW_CACHE_OP_MAX]
915 [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
916[C(L1D)] = {
917 /*
918 * Like some other architectures (e.g. ARM), the performance
919 * counters don't differentiate between read and write
920 * accesses/misses, so this isn't strictly correct, but it's the
921 * best we can do. Writes and reads get combined.
922 */
923 [C(OP_READ)] = {
924 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
925 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
926 },
927 [C(OP_WRITE)] = {
928 [C(RESULT_ACCESS)] = { 0x17, CNTR_ODD, T },
929 [C(RESULT_MISS)] = { 0x18, CNTR_ODD, T },
930 },
931 [C(OP_PREFETCH)] = {
932 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
933 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
934 },
935},
936[C(L1I)] = {
937 [C(OP_READ)] = {
938 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
939 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
940 },
941 [C(OP_WRITE)] = {
942 [C(RESULT_ACCESS)] = { 0x06, CNTR_EVEN, T },
943 [C(RESULT_MISS)] = { 0x06, CNTR_ODD, T },
944 },
945 [C(OP_PREFETCH)] = {
946 [C(RESULT_ACCESS)] = { 0x34, CNTR_EVEN, T },
947 /*
948 * Note that MIPS has only "hit" events countable for
949 * the prefetch operation.
950 */
951 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
952 },
953},
954[C(LL)] = {
955 [C(OP_READ)] = {
956 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
957 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
958 },
959 [C(OP_WRITE)] = {
960 [C(RESULT_ACCESS)] = { 0x1c, CNTR_ODD, P },
961 [C(RESULT_MISS)] = { 0x1d, CNTR_EVEN | CNTR_ODD, P },
962 },
963 [C(OP_PREFETCH)] = {
964 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
965 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
966 },
967},
968[C(DTLB)] = {
969 /* 74K core does not have specific DTLB events. */
970 [C(OP_READ)] = {
971 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
972 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
973 },
974 [C(OP_WRITE)] = {
975 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
976 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
977 },
978 [C(OP_PREFETCH)] = {
979 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
980 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
981 },
982},
983[C(ITLB)] = {
984 [C(OP_READ)] = {
985 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
986 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
987 },
988 [C(OP_WRITE)] = {
989 [C(RESULT_ACCESS)] = { 0x04, CNTR_EVEN, T },
990 [C(RESULT_MISS)] = { 0x04, CNTR_ODD, T },
991 },
992 [C(OP_PREFETCH)] = {
993 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
994 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
995 },
996},
997[C(BPU)] = {
998 /* Using the same code for *HW_BRANCH* */
999 [C(OP_READ)] = {
1000 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1001 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1002 },
1003 [C(OP_WRITE)] = {
1004 [C(RESULT_ACCESS)] = { 0x27, CNTR_EVEN, T },
1005 [C(RESULT_MISS)] = { 0x27, CNTR_ODD, T },
1006 },
1007 [C(OP_PREFETCH)] = {
1008 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1009 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1010 },
1011},
Peter Zijlstra89d6c0b2011-04-22 23:37:06 +02001012[C(NODE)] = {
1013 [C(OP_READ)] = {
1014 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1015 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1016 },
1017 [C(OP_WRITE)] = {
1018 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1019 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1020 },
1021 [C(OP_PREFETCH)] = {
1022 [C(RESULT_ACCESS)] = { UNSUPPORTED_PERF_EVENT_ID },
1023 [C(RESULT_MISS)] = { UNSUPPORTED_PERF_EVENT_ID },
1024 },
1025},
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001026};
1027
1028#ifdef CONFIG_MIPS_MT_SMP
David Daney4409af32011-09-24 02:29:55 +02001029static void check_and_calc_range(struct perf_event *event,
1030 const struct mips_perf_event *pev)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001031{
1032 struct hw_perf_event *hwc = &event->hw;
1033
1034 if (event->cpu >= 0) {
1035 if (pev->range > V) {
1036 /*
1037 * The user selected an event that is processor
1038 * wide, while expecting it to be VPE wide.
1039 */
1040 hwc->config_base |= M_TC_EN_ALL;
1041 } else {
1042 /*
1043 * FIXME: cpu_data[event->cpu].vpe_id reports 0
1044 * for both CPUs.
1045 */
1046 hwc->config_base |= M_PERFCTL_VPEID(event->cpu);
1047 hwc->config_base |= M_TC_EN_VPE;
1048 }
1049 } else
1050 hwc->config_base |= M_TC_EN_ALL;
1051}
1052#else
David Daney4409af32011-09-24 02:29:55 +02001053static void check_and_calc_range(struct perf_event *event,
1054 const struct mips_perf_event *pev)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001055{
1056}
1057#endif
1058
1059static int __hw_perf_event_init(struct perf_event *event)
1060{
1061 struct perf_event_attr *attr = &event->attr;
1062 struct hw_perf_event *hwc = &event->hw;
1063 const struct mips_perf_event *pev;
1064 int err;
1065
1066 /* Returning MIPS event descriptor for generic perf event. */
1067 if (PERF_TYPE_HARDWARE == event->attr.type) {
1068 if (event->attr.config >= PERF_COUNT_HW_MAX)
1069 return -EINVAL;
1070 pev = mipspmu_map_general_event(event->attr.config);
1071 } else if (PERF_TYPE_HW_CACHE == event->attr.type) {
1072 pev = mipspmu_map_cache_event(event->attr.config);
1073 } else if (PERF_TYPE_RAW == event->attr.type) {
1074 /* We are working on the global raw event. */
1075 mutex_lock(&raw_event_mutex);
1076 pev = mipspmu->map_raw_event(event->attr.config);
1077 } else {
1078 /* The event type is not (yet) supported. */
1079 return -EOPNOTSUPP;
1080 }
1081
1082 if (IS_ERR(pev)) {
1083 if (PERF_TYPE_RAW == event->attr.type)
1084 mutex_unlock(&raw_event_mutex);
1085 return PTR_ERR(pev);
1086 }
1087
1088 /*
1089 * We allow max flexibility on how each individual counter shared
1090 * by the single CPU operates (the mode exclusion and the range).
1091 */
1092 hwc->config_base = M_PERFCTL_INTERRUPT_ENABLE;
1093
1094 /* Calculate range bits and validate it. */
1095 if (num_possible_cpus() > 1)
1096 check_and_calc_range(event, pev);
1097
1098 hwc->event_base = mipspmu_perf_event_encode(pev);
1099 if (PERF_TYPE_RAW == event->attr.type)
1100 mutex_unlock(&raw_event_mutex);
1101
1102 if (!attr->exclude_user)
1103 hwc->config_base |= M_PERFCTL_USER;
1104 if (!attr->exclude_kernel) {
1105 hwc->config_base |= M_PERFCTL_KERNEL;
1106 /* MIPS kernel mode: KSU == 00b || EXL == 1 || ERL == 1 */
1107 hwc->config_base |= M_PERFCTL_EXL;
1108 }
1109 if (!attr->exclude_hv)
1110 hwc->config_base |= M_PERFCTL_SUPERVISOR;
1111
1112 hwc->config_base &= M_PERFCTL_CONFIG_MASK;
1113 /*
1114 * The event can belong to another cpu. We do not assign a local
1115 * counter for it for now.
1116 */
1117 hwc->idx = -1;
1118 hwc->config = 0;
1119
1120 if (!hwc->sample_period) {
1121 hwc->sample_period = MAX_PERIOD;
1122 hwc->last_period = hwc->sample_period;
1123 local64_set(&hwc->period_left, hwc->sample_period);
1124 }
1125
1126 err = 0;
1127 if (event->group_leader != event) {
1128 err = validate_group(event);
1129 if (err)
1130 return -EINVAL;
1131 }
1132
1133 event->destroy = hw_perf_event_destroy;
1134
1135 return err;
1136}
1137
1138static void pause_local_counters(void)
1139{
1140 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1141 int counters = mipspmu->num_counters;
1142 unsigned long flags;
1143
1144 local_irq_save(flags);
1145 switch (counters) {
1146 case 4:
1147 cpuc->saved_ctrl[3] = r_c0_perfctrl3();
1148 w_c0_perfctrl3(cpuc->saved_ctrl[3] &
1149 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1150 case 3:
1151 cpuc->saved_ctrl[2] = r_c0_perfctrl2();
1152 w_c0_perfctrl2(cpuc->saved_ctrl[2] &
1153 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1154 case 2:
1155 cpuc->saved_ctrl[1] = r_c0_perfctrl1();
1156 w_c0_perfctrl1(cpuc->saved_ctrl[1] &
1157 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1158 case 1:
1159 cpuc->saved_ctrl[0] = r_c0_perfctrl0();
1160 w_c0_perfctrl0(cpuc->saved_ctrl[0] &
1161 ~M_PERFCTL_COUNT_EVENT_WHENEVER);
1162 }
1163 local_irq_restore(flags);
1164}
1165
1166static void resume_local_counters(void)
1167{
1168 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1169 int counters = mipspmu->num_counters;
1170 unsigned long flags;
1171
1172 local_irq_save(flags);
1173 switch (counters) {
1174 case 4:
1175 w_c0_perfctrl3(cpuc->saved_ctrl[3]);
1176 case 3:
1177 w_c0_perfctrl2(cpuc->saved_ctrl[2]);
1178 case 2:
1179 w_c0_perfctrl1(cpuc->saved_ctrl[1]);
1180 case 1:
1181 w_c0_perfctrl0(cpuc->saved_ctrl[0]);
1182 }
1183 local_irq_restore(flags);
1184}
1185
1186static int mipsxx_pmu_handle_shared_irq(void)
1187{
1188 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1189 struct perf_sample_data data;
1190 unsigned int counters = mipspmu->num_counters;
1191 unsigned int counter;
1192 int handled = IRQ_NONE;
1193 struct pt_regs *regs;
1194
1195 if (cpu_has_mips_r2 && !(read_c0_cause() & (1 << 26)))
1196 return handled;
1197
1198 /*
1199 * First we pause the local counters, so that when we are locked
1200 * here, the counters are all paused. When it gets locked due to
1201 * perf_disable(), the timer interrupt handler will be delayed.
1202 *
1203 * See also mipsxx_pmu_start().
1204 */
1205 pause_local_counters();
1206#ifdef CONFIG_MIPS_MT_SMP
1207 read_lock(&pmuint_rwlock);
1208#endif
1209
1210 regs = get_irq_regs();
1211
1212 perf_sample_data_init(&data, 0);
1213
1214 switch (counters) {
1215#define HANDLE_COUNTER(n) \
1216 case n + 1: \
1217 if (test_bit(n, cpuc->used_mask)) { \
1218 counter = r_c0_perfcntr ## n(); \
1219 if (counter & M_COUNTER_OVERFLOW) { \
1220 w_c0_perfcntr ## n(counter & \
1221 VALID_COUNT); \
1222 if (test_and_change_bit(n, cpuc->msbs)) \
1223 handle_associated_event(cpuc, \
1224 n, &data, regs); \
1225 handled = IRQ_HANDLED; \
1226 } \
1227 }
1228 HANDLE_COUNTER(3)
1229 HANDLE_COUNTER(2)
1230 HANDLE_COUNTER(1)
1231 HANDLE_COUNTER(0)
1232 }
1233
1234 /*
1235 * Do all the work for the pending perf events. We can do this
1236 * in here because the performance counter interrupt is a regular
1237 * interrupt, not NMI.
1238 */
1239 if (handled == IRQ_HANDLED)
Deng-Cheng Zhu91f01732011-01-21 16:19:17 +08001240 irq_work_run();
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001241
1242#ifdef CONFIG_MIPS_MT_SMP
1243 read_unlock(&pmuint_rwlock);
1244#endif
1245 resume_local_counters();
1246 return handled;
1247}
1248
David Daney4409af32011-09-24 02:29:55 +02001249static irqreturn_t mipsxx_pmu_handle_irq(int irq, void *dev)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001250{
1251 return mipsxx_pmu_handle_shared_irq();
1252}
1253
1254static void mipsxx_pmu_start(void)
1255{
1256#ifdef CONFIG_MIPS_MT_SMP
1257 write_unlock(&pmuint_rwlock);
1258#endif
1259 resume_local_counters();
1260}
1261
1262/*
1263 * MIPS performance counters can be per-TC. The control registers can
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001264 * not be directly accessed across CPUs. Hence if we want to do global
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001265 * control, we need cross CPU calls. on_each_cpu() can help us, but we
1266 * can not make sure this function is called with interrupts enabled. So
1267 * here we pause local counters and then grab a rwlock and leave the
1268 * counters on other CPUs alone. If any counter interrupt raises while
1269 * we own the write lock, simply pause local counters on that CPU and
1270 * spin in the handler. Also we know we won't be switched to another
1271 * CPU after pausing local counters and before grabbing the lock.
1272 */
1273static void mipsxx_pmu_stop(void)
1274{
1275 pause_local_counters();
1276#ifdef CONFIG_MIPS_MT_SMP
1277 write_lock(&pmuint_rwlock);
1278#endif
1279}
1280
David Daney4409af32011-09-24 02:29:55 +02001281static int mipsxx_pmu_alloc_counter(struct cpu_hw_events *cpuc,
1282 struct hw_perf_event *hwc)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001283{
1284 int i;
1285
1286 /*
1287 * We only need to care the counter mask. The range has been
1288 * checked definitely.
1289 */
1290 unsigned long cntr_mask = (hwc->event_base >> 8) & 0xffff;
1291
1292 for (i = mipspmu->num_counters - 1; i >= 0; i--) {
1293 /*
1294 * Note that some MIPS perf events can be counted by both
1295 * even and odd counters, wheresas many other are only by
1296 * even _or_ odd counters. This introduces an issue that
1297 * when the former kind of event takes the counter the
1298 * latter kind of event wants to use, then the "counter
1299 * allocation" for the latter event will fail. In fact if
1300 * they can be dynamically swapped, they both feel happy.
1301 * But here we leave this issue alone for now.
1302 */
1303 if (test_bit(i, &cntr_mask) &&
1304 !test_and_set_bit(i, cpuc->used_mask))
1305 return i;
1306 }
1307
1308 return -EAGAIN;
1309}
1310
David Daney4409af32011-09-24 02:29:55 +02001311static void mipsxx_pmu_enable_event(struct hw_perf_event *evt, int idx)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001312{
1313 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1314 unsigned long flags;
1315
1316 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
1317
1318 local_irq_save(flags);
1319 cpuc->saved_ctrl[idx] = M_PERFCTL_EVENT(evt->event_base & 0xff) |
1320 (evt->config_base & M_PERFCTL_CONFIG_MASK) |
1321 /* Make sure interrupt enabled. */
1322 M_PERFCTL_INTERRUPT_ENABLE;
1323 /*
1324 * We do not actually let the counter run. Leave it until start().
1325 */
1326 local_irq_restore(flags);
1327}
1328
David Daney4409af32011-09-24 02:29:55 +02001329static void mipsxx_pmu_disable_event(int idx)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001330{
1331 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1332 unsigned long flags;
1333
1334 WARN_ON(idx < 0 || idx >= mipspmu->num_counters);
1335
1336 local_irq_save(flags);
1337 cpuc->saved_ctrl[idx] = mipsxx_pmu_read_control(idx) &
1338 ~M_PERFCTL_COUNT_EVENT_WHENEVER;
1339 mipsxx_pmu_write_control(idx, cpuc->saved_ctrl[idx]);
1340 local_irq_restore(flags);
1341}
1342
1343/* 24K */
1344#define IS_UNSUPPORTED_24K_EVENT(r, b) \
1345 ((b) == 12 || (r) == 151 || (r) == 152 || (b) == 26 || \
1346 (b) == 27 || (r) == 28 || (r) == 158 || (b) == 31 || \
1347 (b) == 32 || (b) == 34 || (b) == 36 || (r) == 168 || \
1348 (r) == 172 || (b) == 47 || ((b) >= 56 && (b) <= 63) || \
1349 ((b) >= 68 && (b) <= 127))
1350#define IS_BOTH_COUNTERS_24K_EVENT(b) \
1351 ((b) == 0 || (b) == 1 || (b) == 11)
1352
1353/* 34K */
1354#define IS_UNSUPPORTED_34K_EVENT(r, b) \
1355 ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 36 || \
1356 (b) == 38 || (r) == 175 || ((b) >= 56 && (b) <= 63) || \
1357 ((b) >= 68 && (b) <= 127))
1358#define IS_BOTH_COUNTERS_34K_EVENT(b) \
1359 ((b) == 0 || (b) == 1 || (b) == 11)
1360#ifdef CONFIG_MIPS_MT_SMP
1361#define IS_RANGE_P_34K_EVENT(r, b) \
1362 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1363 (b) == 25 || (b) == 39 || (r) == 44 || (r) == 174 || \
1364 (r) == 176 || ((b) >= 50 && (b) <= 55) || \
1365 ((b) >= 64 && (b) <= 67))
1366#define IS_RANGE_V_34K_EVENT(r) ((r) == 47)
1367#endif
1368
1369/* 74K */
1370#define IS_UNSUPPORTED_74K_EVENT(r, b) \
1371 ((r) == 5 || ((r) >= 135 && (r) <= 137) || \
1372 ((b) >= 10 && (b) <= 12) || (b) == 22 || (b) == 27 || \
1373 (b) == 33 || (b) == 34 || ((b) >= 47 && (b) <= 49) || \
1374 (r) == 178 || (b) == 55 || (b) == 57 || (b) == 60 || \
1375 (b) == 61 || (r) == 62 || (r) == 191 || \
1376 ((b) >= 64 && (b) <= 127))
1377#define IS_BOTH_COUNTERS_74K_EVENT(b) \
1378 ((b) == 0 || (b) == 1)
1379
1380/* 1004K */
1381#define IS_UNSUPPORTED_1004K_EVENT(r, b) \
1382 ((b) == 12 || (r) == 27 || (r) == 158 || (b) == 38 || \
1383 (r) == 175 || (b) == 63 || ((b) >= 68 && (b) <= 127))
1384#define IS_BOTH_COUNTERS_1004K_EVENT(b) \
1385 ((b) == 0 || (b) == 1 || (b) == 11)
1386#ifdef CONFIG_MIPS_MT_SMP
1387#define IS_RANGE_P_1004K_EVENT(r, b) \
1388 ((b) == 0 || (r) == 18 || (b) == 21 || (b) == 22 || \
1389 (b) == 25 || (b) == 36 || (b) == 39 || (r) == 44 || \
1390 (r) == 174 || (r) == 176 || ((b) >= 50 && (b) <= 59) || \
1391 (r) == 188 || (b) == 61 || (b) == 62 || \
1392 ((b) >= 64 && (b) <= 67))
1393#define IS_RANGE_V_1004K_EVENT(r) ((r) == 47)
1394#endif
1395
1396/*
1397 * User can use 0-255 raw events, where 0-127 for the events of even
1398 * counters, and 128-255 for odd counters. Note that bit 7 is used to
1399 * indicate the parity. So, for example, when user wants to take the
1400 * Event Num of 15 for odd counters (by referring to the user manual),
1401 * then 128 needs to be added to 15 as the input for the event config,
1402 * i.e., 143 (0x8F) to be used.
1403 */
David Daney4409af32011-09-24 02:29:55 +02001404static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001405{
1406 unsigned int raw_id = config & 0xff;
1407 unsigned int base_id = raw_id & 0x7f;
1408
1409 switch (current_cpu_type()) {
1410 case CPU_24K:
1411 if (IS_UNSUPPORTED_24K_EVENT(raw_id, base_id))
1412 return ERR_PTR(-EOPNOTSUPP);
1413 raw_event.event_id = base_id;
1414 if (IS_BOTH_COUNTERS_24K_EVENT(base_id))
1415 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1416 else
1417 raw_event.cntr_mask =
1418 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1419#ifdef CONFIG_MIPS_MT_SMP
1420 /*
1421 * This is actually doing nothing. Non-multithreading
1422 * CPUs will not check and calculate the range.
1423 */
1424 raw_event.range = P;
1425#endif
1426 break;
1427 case CPU_34K:
1428 if (IS_UNSUPPORTED_34K_EVENT(raw_id, base_id))
1429 return ERR_PTR(-EOPNOTSUPP);
1430 raw_event.event_id = base_id;
1431 if (IS_BOTH_COUNTERS_34K_EVENT(base_id))
1432 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1433 else
1434 raw_event.cntr_mask =
1435 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1436#ifdef CONFIG_MIPS_MT_SMP
1437 if (IS_RANGE_P_34K_EVENT(raw_id, base_id))
1438 raw_event.range = P;
1439 else if (unlikely(IS_RANGE_V_34K_EVENT(raw_id)))
1440 raw_event.range = V;
1441 else
1442 raw_event.range = T;
1443#endif
1444 break;
1445 case CPU_74K:
1446 if (IS_UNSUPPORTED_74K_EVENT(raw_id, base_id))
1447 return ERR_PTR(-EOPNOTSUPP);
1448 raw_event.event_id = base_id;
1449 if (IS_BOTH_COUNTERS_74K_EVENT(base_id))
1450 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1451 else
1452 raw_event.cntr_mask =
1453 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1454#ifdef CONFIG_MIPS_MT_SMP
1455 raw_event.range = P;
1456#endif
1457 break;
1458 case CPU_1004K:
1459 if (IS_UNSUPPORTED_1004K_EVENT(raw_id, base_id))
1460 return ERR_PTR(-EOPNOTSUPP);
1461 raw_event.event_id = base_id;
1462 if (IS_BOTH_COUNTERS_1004K_EVENT(base_id))
1463 raw_event.cntr_mask = CNTR_EVEN | CNTR_ODD;
1464 else
1465 raw_event.cntr_mask =
1466 raw_id > 127 ? CNTR_ODD : CNTR_EVEN;
1467#ifdef CONFIG_MIPS_MT_SMP
1468 if (IS_RANGE_P_1004K_EVENT(raw_id, base_id))
1469 raw_event.range = P;
1470 else if (unlikely(IS_RANGE_V_1004K_EVENT(raw_id)))
1471 raw_event.range = V;
1472 else
1473 raw_event.range = T;
1474#endif
1475 break;
1476 }
1477
1478 return &raw_event;
1479}
1480
1481static struct mips_pmu mipsxxcore_pmu = {
1482 .handle_irq = mipsxx_pmu_handle_irq,
1483 .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
1484 .start = mipsxx_pmu_start,
1485 .stop = mipsxx_pmu_stop,
1486 .alloc_counter = mipsxx_pmu_alloc_counter,
1487 .read_counter = mipsxx_pmu_read_counter,
1488 .write_counter = mipsxx_pmu_write_counter,
1489 .enable_event = mipsxx_pmu_enable_event,
1490 .disable_event = mipsxx_pmu_disable_event,
1491 .map_raw_event = mipsxx_pmu_map_raw_event,
1492 .general_event_map = &mipsxxcore_event_map,
1493 .cache_event_map = &mipsxxcore_cache_map,
1494};
1495
1496static struct mips_pmu mipsxx74Kcore_pmu = {
1497 .handle_irq = mipsxx_pmu_handle_irq,
1498 .handle_shared_irq = mipsxx_pmu_handle_shared_irq,
1499 .start = mipsxx_pmu_start,
1500 .stop = mipsxx_pmu_stop,
1501 .alloc_counter = mipsxx_pmu_alloc_counter,
1502 .read_counter = mipsxx_pmu_read_counter,
1503 .write_counter = mipsxx_pmu_write_counter,
1504 .enable_event = mipsxx_pmu_enable_event,
1505 .disable_event = mipsxx_pmu_disable_event,
1506 .map_raw_event = mipsxx_pmu_map_raw_event,
1507 .general_event_map = &mipsxx74Kcore_event_map,
1508 .cache_event_map = &mipsxx74Kcore_cache_map,
1509};
1510
1511static int __init
1512init_hw_perf_events(void)
1513{
1514 int counters, irq;
1515
1516 pr_info("Performance counters: ");
1517
1518 counters = n_counters();
1519 if (counters == 0) {
1520 pr_cont("No available PMU.\n");
1521 return -ENODEV;
1522 }
1523
1524#ifdef CONFIG_MIPS_MT_SMP
1525 cpu_has_mipsmt_pertccounters = read_c0_config7() & (1<<19);
1526 if (!cpu_has_mipsmt_pertccounters)
1527 counters = counters_total_to_per_cpu(counters);
1528#endif
1529
1530#ifdef MSC01E_INT_BASE
1531 if (cpu_has_veic) {
1532 /*
1533 * Using platform specific interrupt controller defines.
1534 */
1535 irq = MSC01E_INT_BASE + MSC01E_INT_PERFCTR;
1536 } else {
1537#endif
1538 if (cp0_perfcount_irq >= 0)
1539 irq = MIPS_CPU_IRQ_BASE + cp0_perfcount_irq;
1540 else
1541 irq = -1;
1542#ifdef MSC01E_INT_BASE
1543 }
1544#endif
1545
1546 on_each_cpu(reset_counters, (void *)(long)counters, 1);
1547
1548 switch (current_cpu_type()) {
1549 case CPU_24K:
1550 mipsxxcore_pmu.name = "mips/24K";
1551 mipsxxcore_pmu.num_counters = counters;
1552 mipsxxcore_pmu.irq = irq;
1553 mipspmu = &mipsxxcore_pmu;
1554 break;
1555 case CPU_34K:
1556 mipsxxcore_pmu.name = "mips/34K";
1557 mipsxxcore_pmu.num_counters = counters;
1558 mipsxxcore_pmu.irq = irq;
1559 mipspmu = &mipsxxcore_pmu;
1560 break;
1561 case CPU_74K:
1562 mipsxx74Kcore_pmu.name = "mips/74K";
1563 mipsxx74Kcore_pmu.num_counters = counters;
1564 mipsxx74Kcore_pmu.irq = irq;
1565 mipspmu = &mipsxx74Kcore_pmu;
1566 break;
1567 case CPU_1004K:
1568 mipsxxcore_pmu.name = "mips/1004K";
1569 mipsxxcore_pmu.num_counters = counters;
1570 mipsxxcore_pmu.irq = irq;
1571 mipspmu = &mipsxxcore_pmu;
1572 break;
1573 default:
1574 pr_cont("Either hardware does not support performance "
1575 "counters, or not yet implemented.\n");
1576 return -ENODEV;
1577 }
1578
1579 if (mipspmu)
1580 pr_cont("%s PMU enabled, %d counters available to each "
1581 "CPU, irq %d%s\n", mipspmu->name, counters, irq,
1582 irq < 0 ? " (share with timer interrupt)" : "");
1583
Deng-Cheng Zhu404ff632011-01-21 16:19:18 +08001584 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1585
Deng-Cheng Zhu3a9ab992010-10-12 19:37:24 +08001586 return 0;
1587}
Peter Zijlstra004417a2010-11-25 18:38:29 +01001588early_initcall(init_hw_perf_events);