blob: 426180b849780f1f8d082e66488fec4b05d9aeb1 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001/*
2 * Performance event support - powerpc architecture code
3 *
4 * Copyright 2008-2009 Paul Mackerras, IBM Corporation.
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11#include <linux/kernel.h>
12#include <linux/sched.h>
13#include <linux/perf_event.h>
14#include <linux/percpu.h>
15#include <linux/hardirq.h>
Michael Neuling69123182013-05-13 18:44:58 +000016#include <linux/uaccess.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020017#include <asm/reg.h>
18#include <asm/pmc.h>
19#include <asm/machdep.h>
20#include <asm/firmware.h>
21#include <asm/ptrace.h>
Michael Neuling69123182013-05-13 18:44:58 +000022#include <asm/code-patching.h>
Ingo Molnarcdd6c482009-09-21 12:02:48 +020023
Anshuman Khandual3925f462013-04-22 19:42:44 +000024#define BHRB_MAX_ENTRIES 32
25#define BHRB_TARGET 0x0000000000000002
26#define BHRB_PREDICTION 0x0000000000000001
27#define BHRB_EA 0xFFFFFFFFFFFFFFFC
28
Ingo Molnarcdd6c482009-09-21 12:02:48 +020029struct cpu_hw_events {
30 int n_events;
31 int n_percpu;
32 int disabled;
33 int n_added;
34 int n_limited;
35 u8 pmcs_enabled;
36 struct perf_event *event[MAX_HWEVENTS];
37 u64 events[MAX_HWEVENTS];
38 unsigned int flags[MAX_HWEVENTS];
39 unsigned long mmcr[3];
Paul Mackerrasa8f90e92009-09-22 09:48:08 +100040 struct perf_event *limited_counter[MAX_LIMITED_HWCOUNTERS];
41 u8 limited_hwidx[MAX_LIMITED_HWCOUNTERS];
Ingo Molnarcdd6c482009-09-21 12:02:48 +020042 u64 alternatives[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
43 unsigned long amasks[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
44 unsigned long avalues[MAX_HWEVENTS][MAX_EVENT_ALTERNATIVES];
Lin Ming8e6d5572010-05-08 20:28:41 +100045
46 unsigned int group_flag;
47 int n_txn_start;
Anshuman Khandual3925f462013-04-22 19:42:44 +000048
49 /* BHRB bits */
50 u64 bhrb_filter; /* BHRB HW branch filter */
51 int bhrb_users;
52 void *bhrb_context;
53 struct perf_branch_stack bhrb_stack;
54 struct perf_branch_entry bhrb_entries[BHRB_MAX_ENTRIES];
Ingo Molnarcdd6c482009-09-21 12:02:48 +020055};
Anshuman Khandual3925f462013-04-22 19:42:44 +000056
Ingo Molnarcdd6c482009-09-21 12:02:48 +020057DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events);
58
59struct power_pmu *ppmu;
60
61/*
Ingo Molnar57c0c152009-09-21 12:20:38 +020062 * Normally, to ignore kernel events we set the FCS (freeze counters
Ingo Molnarcdd6c482009-09-21 12:02:48 +020063 * in supervisor mode) bit in MMCR0, but if the kernel runs with the
64 * hypervisor bit set in the MSR, or if we are running on a processor
65 * where the hypervisor bit is forced to 1 (as on Apple G5 processors),
66 * then we need to use the FCHV bit to ignore kernel events.
67 */
68static unsigned int freeze_events_kernel = MMCR0_FCS;
69
70/*
71 * 32-bit doesn't have MMCRA but does have an MMCR2,
72 * and a few other names are different.
73 */
74#ifdef CONFIG_PPC32
75
76#define MMCR0_FCHV 0
77#define MMCR0_PMCjCE MMCR0_PMCnCE
78
79#define SPRN_MMCRA SPRN_MMCR2
80#define MMCRA_SAMPLE_ENABLE 0
81
82static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
83{
84 return 0;
85}
86static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp) { }
87static inline u32 perf_get_misc_flags(struct pt_regs *regs)
88{
89 return 0;
90}
Anton Blanchard75382aa2012-06-26 01:01:36 +000091static inline void perf_read_regs(struct pt_regs *regs)
92{
93 regs->result = 0;
94}
Ingo Molnarcdd6c482009-09-21 12:02:48 +020095static inline int perf_intr_is_nmi(struct pt_regs *regs)
96{
97 return 0;
98}
99
sukadev@linux.vnet.ibm.come6878832012-09-18 20:56:11 +0000100static inline int siar_valid(struct pt_regs *regs)
101{
102 return 1;
103}
104
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000105static inline void power_pmu_bhrb_enable(struct perf_event *event) {}
106static inline void power_pmu_bhrb_disable(struct perf_event *event) {}
107void power_pmu_flush_branch_stack(void) {}
108static inline void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw) {}
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200109#endif /* CONFIG_PPC32 */
110
Michael Ellerman33904052013-04-25 19:28:25 +0000111static bool regs_use_siar(struct pt_regs *regs)
112{
113 return !!(regs->result & 1);
114}
115
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200116/*
117 * Things that are specific to 64-bit implementations.
118 */
119#ifdef CONFIG_PPC64
120
121static inline unsigned long perf_ip_adjust(struct pt_regs *regs)
122{
123 unsigned long mmcra = regs->dsisr;
124
Michael Ellerman7a786832013-04-25 19:28:23 +0000125 if ((ppmu->flags & PPMU_HAS_SSLOT) && (mmcra & MMCRA_SAMPLE_ENABLE)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200126 unsigned long slot = (mmcra & MMCRA_SLOT) >> MMCRA_SLOT_SHIFT;
127 if (slot > 1)
128 return 4 * (slot - 1);
129 }
Michael Ellerman7a786832013-04-25 19:28:23 +0000130
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200131 return 0;
132}
133
134/*
135 * The user wants a data address recorded.
136 * If we're not doing instruction sampling, give them the SDAR
137 * (sampled data address). If we are doing instruction sampling, then
138 * only give them the SDAR if it corresponds to the instruction
sukadev@linux.vnet.ibm.come6878832012-09-18 20:56:11 +0000139 * pointed to by SIAR; this is indicated by the [POWER6_]MMCRA_SDSYNC or
140 * the [POWER7P_]MMCRA_SDAR_VALID bit in MMCRA.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200141 */
142static inline void perf_get_data_addr(struct pt_regs *regs, u64 *addrp)
143{
144 unsigned long mmcra = regs->dsisr;
sukadev@linux.vnet.ibm.come6878832012-09-18 20:56:11 +0000145 unsigned long sdsync;
146
147 if (ppmu->flags & PPMU_SIAR_VALID)
148 sdsync = POWER7P_MMCRA_SDAR_VALID;
149 else if (ppmu->flags & PPMU_ALT_SIPR)
150 sdsync = POWER6_MMCRA_SDSYNC;
151 else
152 sdsync = MMCRA_SDSYNC;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200153
154 if (!(mmcra & MMCRA_SAMPLE_ENABLE) || (mmcra & sdsync))
155 *addrp = mfspr(SPRN_SDAR);
156}
157
Michael Ellerman5682c462013-04-25 19:28:24 +0000158static bool regs_sihv(struct pt_regs *regs)
Anton Blanchard68b30bb2012-06-26 01:00:13 +0000159{
160 unsigned long sihv = MMCRA_SIHV;
161
Michael Ellerman8f61aa32013-04-25 19:28:27 +0000162 if (ppmu->flags & PPMU_HAS_SIER)
163 return !!(regs->dar & SIER_SIHV);
164
Anton Blanchard68b30bb2012-06-26 01:00:13 +0000165 if (ppmu->flags & PPMU_ALT_SIPR)
166 sihv = POWER6_MMCRA_SIHV;
167
Michael Ellerman5682c462013-04-25 19:28:24 +0000168 return !!(regs->dsisr & sihv);
Anton Blanchard68b30bb2012-06-26 01:00:13 +0000169}
170
Michael Ellerman5682c462013-04-25 19:28:24 +0000171static bool regs_sipr(struct pt_regs *regs)
Anton Blanchard68b30bb2012-06-26 01:00:13 +0000172{
173 unsigned long sipr = MMCRA_SIPR;
174
Michael Ellerman8f61aa32013-04-25 19:28:27 +0000175 if (ppmu->flags & PPMU_HAS_SIER)
176 return !!(regs->dar & SIER_SIPR);
177
Anton Blanchard68b30bb2012-06-26 01:00:13 +0000178 if (ppmu->flags & PPMU_ALT_SIPR)
179 sipr = POWER6_MMCRA_SIPR;
180
Michael Ellerman5682c462013-04-25 19:28:24 +0000181 return !!(regs->dsisr & sipr);
Anton Blanchard68b30bb2012-06-26 01:00:13 +0000182}
183
Michael Ellerman860aad72013-04-25 19:28:26 +0000184static bool regs_no_sipr(struct pt_regs *regs)
185{
186 return !!(regs->result & 2);
187}
188
Benjamin Herrenschmidt1ce447b2012-03-26 20:47:34 +0000189static inline u32 perf_flags_from_msr(struct pt_regs *regs)
190{
191 if (regs->msr & MSR_PR)
192 return PERF_RECORD_MISC_USER;
193 if ((regs->msr & MSR_HV) && freeze_events_kernel != MMCR0_FCHV)
194 return PERF_RECORD_MISC_HYPERVISOR;
195 return PERF_RECORD_MISC_KERNEL;
196}
197
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200198static inline u32 perf_get_misc_flags(struct pt_regs *regs)
199{
Michael Ellerman33904052013-04-25 19:28:25 +0000200 bool use_siar = regs_use_siar(regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200201
Anton Blanchard75382aa2012-06-26 01:01:36 +0000202 if (!use_siar)
Benjamin Herrenschmidt1ce447b2012-03-26 20:47:34 +0000203 return perf_flags_from_msr(regs);
204
205 /*
206 * If we don't have flags in MMCRA, rather than using
207 * the MSR, we intuit the flags from the address in
208 * SIAR which should give slightly more reliable
209 * results
210 */
Michael Ellerman860aad72013-04-25 19:28:26 +0000211 if (regs_no_sipr(regs)) {
Benjamin Herrenschmidt1ce447b2012-03-26 20:47:34 +0000212 unsigned long siar = mfspr(SPRN_SIAR);
213 if (siar >= PAGE_OFFSET)
214 return PERF_RECORD_MISC_KERNEL;
215 return PERF_RECORD_MISC_USER;
216 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200217
Michael Neuling7abb8402009-10-14 19:32:15 +0000218 /* PR has priority over HV, so order below is important */
Michael Ellerman5682c462013-04-25 19:28:24 +0000219 if (regs_sipr(regs))
Michael Neuling7abb8402009-10-14 19:32:15 +0000220 return PERF_RECORD_MISC_USER;
Michael Ellerman5682c462013-04-25 19:28:24 +0000221
222 if (regs_sihv(regs) && (freeze_events_kernel != MMCR0_FCHV))
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200223 return PERF_RECORD_MISC_HYPERVISOR;
Michael Ellerman5682c462013-04-25 19:28:24 +0000224
Michael Neuling7abb8402009-10-14 19:32:15 +0000225 return PERF_RECORD_MISC_KERNEL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200226}
227
228/*
229 * Overload regs->dsisr to store MMCRA so we only need to read it once
230 * on each interrupt.
Michael Ellerman8f61aa32013-04-25 19:28:27 +0000231 * Overload regs->dar to store SIER if we have it.
Anton Blanchard75382aa2012-06-26 01:01:36 +0000232 * Overload regs->result to specify whether we should use the MSR (result
233 * is zero) or the SIAR (result is non zero).
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200234 */
235static inline void perf_read_regs(struct pt_regs *regs)
236{
Anton Blanchard75382aa2012-06-26 01:01:36 +0000237 unsigned long mmcra = mfspr(SPRN_MMCRA);
238 int marked = mmcra & MMCRA_SAMPLE_ENABLE;
239 int use_siar;
240
Michael Ellerman5682c462013-04-25 19:28:24 +0000241 regs->dsisr = mmcra;
Michael Ellerman860aad72013-04-25 19:28:26 +0000242 regs->result = 0;
243
244 if (ppmu->flags & PPMU_NO_SIPR)
245 regs->result |= 2;
Michael Ellerman5682c462013-04-25 19:28:24 +0000246
Anton Blanchard5c093ef2012-06-26 01:02:15 +0000247 /*
Michael Ellerman8f61aa32013-04-25 19:28:27 +0000248 * On power8 if we're in random sampling mode, the SIER is updated.
249 * If we're in continuous sampling mode, we don't have SIPR.
250 */
251 if (ppmu->flags & PPMU_HAS_SIER) {
252 if (marked)
253 regs->dar = mfspr(SPRN_SIER);
254 else
255 regs->result |= 2;
256 }
257
258
259 /*
Anton Blanchard5c093ef2012-06-26 01:02:15 +0000260 * If this isn't a PMU exception (eg a software event) the SIAR is
261 * not valid. Use pt_regs.
262 *
263 * If it is a marked event use the SIAR.
264 *
265 * If the PMU doesn't update the SIAR for non marked events use
266 * pt_regs.
267 *
268 * If the PMU has HV/PR flags then check to see if they
269 * place the exception in userspace. If so, use pt_regs. In
270 * continuous sampling mode the SIAR and the PMU exception are
271 * not synchronised, so they may be many instructions apart.
272 * This can result in confusing backtraces. We still want
273 * hypervisor samples as well as samples in the kernel with
274 * interrupts off hence the userspace check.
275 */
Anton Blanchard75382aa2012-06-26 01:01:36 +0000276 if (TRAP(regs) != 0xf00)
277 use_siar = 0;
Anton Blanchard5c093ef2012-06-26 01:02:15 +0000278 else if (marked)
279 use_siar = 1;
280 else if ((ppmu->flags & PPMU_NO_CONT_SAMPLING))
281 use_siar = 0;
Michael Ellerman860aad72013-04-25 19:28:26 +0000282 else if (!regs_no_sipr(regs) && regs_sipr(regs))
Anton Blanchard75382aa2012-06-26 01:01:36 +0000283 use_siar = 0;
284 else
285 use_siar = 1;
286
Michael Ellerman860aad72013-04-25 19:28:26 +0000287 regs->result |= use_siar;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200288}
289
290/*
291 * If interrupts were soft-disabled when a PMU interrupt occurs, treat
292 * it as an NMI.
293 */
294static inline int perf_intr_is_nmi(struct pt_regs *regs)
295{
296 return !regs->softe;
297}
298
sukadev@linux.vnet.ibm.come6878832012-09-18 20:56:11 +0000299/*
300 * On processors like P7+ that have the SIAR-Valid bit, marked instructions
301 * must be sampled only if the SIAR-valid bit is set.
302 *
303 * For unmarked instructions and for processors that don't have the SIAR-Valid
304 * bit, assume that SIAR is valid.
305 */
306static inline int siar_valid(struct pt_regs *regs)
307{
308 unsigned long mmcra = regs->dsisr;
309 int marked = mmcra & MMCRA_SAMPLE_ENABLE;
310
311 if ((ppmu->flags & PPMU_SIAR_VALID) && marked)
312 return mmcra & POWER7P_MMCRA_SIAR_VALID;
313
314 return 1;
315}
316
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000317
318/* Reset all possible BHRB entries */
319static void power_pmu_bhrb_reset(void)
320{
321 asm volatile(PPC_CLRBHRB);
322}
323
324static void power_pmu_bhrb_enable(struct perf_event *event)
325{
326 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
327
328 if (!ppmu->bhrb_nr)
329 return;
330
331 /* Clear BHRB if we changed task context to avoid data leaks */
332 if (event->ctx->task && cpuhw->bhrb_context != event->ctx) {
333 power_pmu_bhrb_reset();
334 cpuhw->bhrb_context = event->ctx;
335 }
336 cpuhw->bhrb_users++;
337}
338
339static void power_pmu_bhrb_disable(struct perf_event *event)
340{
341 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
342
343 if (!ppmu->bhrb_nr)
344 return;
345
346 cpuhw->bhrb_users--;
347 WARN_ON_ONCE(cpuhw->bhrb_users < 0);
348
349 if (!cpuhw->disabled && !cpuhw->bhrb_users) {
350 /* BHRB cannot be turned off when other
351 * events are active on the PMU.
352 */
353
354 /* avoid stale pointer */
355 cpuhw->bhrb_context = NULL;
356 }
357}
358
359/* Called from ctxsw to prevent one process's branch entries to
360 * mingle with the other process's entries during context switch.
361 */
362void power_pmu_flush_branch_stack(void)
363{
364 if (ppmu->bhrb_nr)
365 power_pmu_bhrb_reset();
366}
Michael Neuling69123182013-05-13 18:44:58 +0000367/* Calculate the to address for a branch */
368static __u64 power_pmu_bhrb_to(u64 addr)
369{
370 unsigned int instr;
371 int ret;
372 __u64 target;
373
374 if (is_kernel_addr(addr))
375 return branch_target((unsigned int *)addr);
376
377 /* Userspace: need copy instruction here then translate it */
378 pagefault_disable();
379 ret = __get_user_inatomic(instr, (unsigned int __user *)addr);
380 if (ret) {
381 pagefault_enable();
382 return 0;
383 }
384 pagefault_enable();
385
386 target = branch_target(&instr);
387 if ((!target) || (instr & BRANCH_ABSOLUTE))
388 return target;
389
390 /* Translate relative branch target from kernel to user address */
391 return target - (unsigned long)&instr + addr;
392}
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000393
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000394/* Processing BHRB entries */
Michael Neuling506e70d2013-05-13 18:44:57 +0000395void power_pmu_bhrb_read(struct cpu_hw_events *cpuhw)
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000396{
397 u64 val;
398 u64 addr;
Michael Neuling506e70d2013-05-13 18:44:57 +0000399 int r_index, u_index, pred;
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000400
401 r_index = 0;
402 u_index = 0;
403 while (r_index < ppmu->bhrb_nr) {
404 /* Assembly read function */
Michael Neuling506e70d2013-05-13 18:44:57 +0000405 val = read_bhrb(r_index++);
406 if (!val)
407 /* Terminal marker: End of valid BHRB entries */
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000408 break;
Michael Neuling506e70d2013-05-13 18:44:57 +0000409 else {
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000410 addr = val & BHRB_EA;
411 pred = val & BHRB_PREDICTION;
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000412
Michael Neuling506e70d2013-05-13 18:44:57 +0000413 if (!addr)
414 /* invalid entry */
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000415 continue;
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000416
Michael Neuling506e70d2013-05-13 18:44:57 +0000417 /* Branches are read most recent first (ie. mfbhrb 0 is
418 * the most recent branch).
419 * There are two types of valid entries:
420 * 1) a target entry which is the to address of a
421 * computed goto like a blr,bctr,btar. The next
422 * entry read from the bhrb will be branch
423 * corresponding to this target (ie. the actual
424 * blr/bctr/btar instruction).
425 * 2) a from address which is an actual branch. If a
426 * target entry proceeds this, then this is the
427 * matching branch for that target. If this is not
428 * following a target entry, then this is a branch
429 * where the target is given as an immediate field
430 * in the instruction (ie. an i or b form branch).
431 * In this case we need to read the instruction from
432 * memory to determine the target/to address.
433 */
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000434
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000435 if (val & BHRB_TARGET) {
Michael Neuling506e70d2013-05-13 18:44:57 +0000436 /* Target branches use two entries
437 * (ie. computed gotos/XL form)
438 */
439 cpuhw->bhrb_entries[u_index].to = addr;
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000440 cpuhw->bhrb_entries[u_index].mispred = pred;
441 cpuhw->bhrb_entries[u_index].predicted = ~pred;
442
Michael Neuling506e70d2013-05-13 18:44:57 +0000443 /* Get from address in next entry */
444 val = read_bhrb(r_index++);
445 addr = val & BHRB_EA;
446 if (val & BHRB_TARGET) {
447 /* Shouldn't have two targets in a
448 row.. Reset index and try again */
449 r_index--;
450 addr = 0;
451 }
452 cpuhw->bhrb_entries[u_index].from = addr;
453 } else {
454 /* Branches to immediate field
455 (ie I or B form) */
456 cpuhw->bhrb_entries[u_index].from = addr;
Michael Neuling69123182013-05-13 18:44:58 +0000457 cpuhw->bhrb_entries[u_index].to =
458 power_pmu_bhrb_to(addr);
Michael Neuling506e70d2013-05-13 18:44:57 +0000459 cpuhw->bhrb_entries[u_index].mispred = pred;
460 cpuhw->bhrb_entries[u_index].predicted = ~pred;
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000461 }
Michael Neuling506e70d2013-05-13 18:44:57 +0000462 u_index++;
463
Michael Neulingd52f2dc2013-05-13 18:44:56 +0000464 }
465 }
466 cpuhw->bhrb_stack.nr = u_index;
467 return;
468}
469
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200470#endif /* CONFIG_PPC64 */
471
472static void perf_event_interrupt(struct pt_regs *regs);
473
474void perf_event_print_debug(void)
475{
476}
477
478/*
Ingo Molnar57c0c152009-09-21 12:20:38 +0200479 * Read one performance monitor counter (PMC).
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200480 */
481static unsigned long read_pmc(int idx)
482{
483 unsigned long val;
484
485 switch (idx) {
486 case 1:
487 val = mfspr(SPRN_PMC1);
488 break;
489 case 2:
490 val = mfspr(SPRN_PMC2);
491 break;
492 case 3:
493 val = mfspr(SPRN_PMC3);
494 break;
495 case 4:
496 val = mfspr(SPRN_PMC4);
497 break;
498 case 5:
499 val = mfspr(SPRN_PMC5);
500 break;
501 case 6:
502 val = mfspr(SPRN_PMC6);
503 break;
504#ifdef CONFIG_PPC64
505 case 7:
506 val = mfspr(SPRN_PMC7);
507 break;
508 case 8:
509 val = mfspr(SPRN_PMC8);
510 break;
511#endif /* CONFIG_PPC64 */
512 default:
513 printk(KERN_ERR "oops trying to read PMC%d\n", idx);
514 val = 0;
515 }
516 return val;
517}
518
519/*
520 * Write one PMC.
521 */
522static void write_pmc(int idx, unsigned long val)
523{
524 switch (idx) {
525 case 1:
526 mtspr(SPRN_PMC1, val);
527 break;
528 case 2:
529 mtspr(SPRN_PMC2, val);
530 break;
531 case 3:
532 mtspr(SPRN_PMC3, val);
533 break;
534 case 4:
535 mtspr(SPRN_PMC4, val);
536 break;
537 case 5:
538 mtspr(SPRN_PMC5, val);
539 break;
540 case 6:
541 mtspr(SPRN_PMC6, val);
542 break;
543#ifdef CONFIG_PPC64
544 case 7:
545 mtspr(SPRN_PMC7, val);
546 break;
547 case 8:
548 mtspr(SPRN_PMC8, val);
549 break;
550#endif /* CONFIG_PPC64 */
551 default:
552 printk(KERN_ERR "oops trying to write PMC%d\n", idx);
553 }
554}
555
556/*
557 * Check if a set of events can all go on the PMU at once.
558 * If they can't, this will look at alternative codes for the events
559 * and see if any combination of alternative codes is feasible.
560 * The feasible set is returned in event_id[].
561 */
562static int power_check_constraints(struct cpu_hw_events *cpuhw,
563 u64 event_id[], unsigned int cflags[],
564 int n_ev)
565{
566 unsigned long mask, value, nv;
567 unsigned long smasks[MAX_HWEVENTS], svalues[MAX_HWEVENTS];
568 int n_alt[MAX_HWEVENTS], choice[MAX_HWEVENTS];
569 int i, j;
570 unsigned long addf = ppmu->add_fields;
571 unsigned long tadd = ppmu->test_adder;
572
Paul Mackerrasa8f90e92009-09-22 09:48:08 +1000573 if (n_ev > ppmu->n_counter)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200574 return -1;
575
576 /* First see if the events will go on as-is */
577 for (i = 0; i < n_ev; ++i) {
578 if ((cflags[i] & PPMU_LIMITED_PMC_REQD)
579 && !ppmu->limited_pmc_event(event_id[i])) {
580 ppmu->get_alternatives(event_id[i], cflags[i],
581 cpuhw->alternatives[i]);
582 event_id[i] = cpuhw->alternatives[i][0];
583 }
584 if (ppmu->get_constraint(event_id[i], &cpuhw->amasks[i][0],
585 &cpuhw->avalues[i][0]))
586 return -1;
587 }
588 value = mask = 0;
589 for (i = 0; i < n_ev; ++i) {
590 nv = (value | cpuhw->avalues[i][0]) +
591 (value & cpuhw->avalues[i][0] & addf);
592 if ((((nv + tadd) ^ value) & mask) != 0 ||
593 (((nv + tadd) ^ cpuhw->avalues[i][0]) &
594 cpuhw->amasks[i][0]) != 0)
595 break;
596 value = nv;
597 mask |= cpuhw->amasks[i][0];
598 }
599 if (i == n_ev)
600 return 0; /* all OK */
601
602 /* doesn't work, gather alternatives... */
603 if (!ppmu->get_alternatives)
604 return -1;
605 for (i = 0; i < n_ev; ++i) {
606 choice[i] = 0;
607 n_alt[i] = ppmu->get_alternatives(event_id[i], cflags[i],
608 cpuhw->alternatives[i]);
609 for (j = 1; j < n_alt[i]; ++j)
610 ppmu->get_constraint(cpuhw->alternatives[i][j],
611 &cpuhw->amasks[i][j],
612 &cpuhw->avalues[i][j]);
613 }
614
615 /* enumerate all possibilities and see if any will work */
616 i = 0;
617 j = -1;
618 value = mask = nv = 0;
619 while (i < n_ev) {
620 if (j >= 0) {
621 /* we're backtracking, restore context */
622 value = svalues[i];
623 mask = smasks[i];
624 j = choice[i];
625 }
626 /*
627 * See if any alternative k for event_id i,
628 * where k > j, will satisfy the constraints.
629 */
630 while (++j < n_alt[i]) {
631 nv = (value | cpuhw->avalues[i][j]) +
632 (value & cpuhw->avalues[i][j] & addf);
633 if ((((nv + tadd) ^ value) & mask) == 0 &&
634 (((nv + tadd) ^ cpuhw->avalues[i][j])
635 & cpuhw->amasks[i][j]) == 0)
636 break;
637 }
638 if (j >= n_alt[i]) {
639 /*
640 * No feasible alternative, backtrack
641 * to event_id i-1 and continue enumerating its
642 * alternatives from where we got up to.
643 */
644 if (--i < 0)
645 return -1;
646 } else {
647 /*
648 * Found a feasible alternative for event_id i,
649 * remember where we got up to with this event_id,
650 * go on to the next event_id, and start with
651 * the first alternative for it.
652 */
653 choice[i] = j;
654 svalues[i] = value;
655 smasks[i] = mask;
656 value = nv;
657 mask |= cpuhw->amasks[i][j];
658 ++i;
659 j = -1;
660 }
661 }
662
663 /* OK, we have a feasible combination, tell the caller the solution */
664 for (i = 0; i < n_ev; ++i)
665 event_id[i] = cpuhw->alternatives[i][choice[i]];
666 return 0;
667}
668
669/*
670 * Check if newly-added events have consistent settings for
671 * exclude_{user,kernel,hv} with each other and any previously
672 * added events.
673 */
674static int check_excludes(struct perf_event **ctrs, unsigned int cflags[],
675 int n_prev, int n_new)
676{
677 int eu = 0, ek = 0, eh = 0;
678 int i, n, first;
679 struct perf_event *event;
680
681 n = n_prev + n_new;
682 if (n <= 1)
683 return 0;
684
685 first = 1;
686 for (i = 0; i < n; ++i) {
687 if (cflags[i] & PPMU_LIMITED_PMC_OK) {
688 cflags[i] &= ~PPMU_LIMITED_PMC_REQD;
689 continue;
690 }
691 event = ctrs[i];
692 if (first) {
693 eu = event->attr.exclude_user;
694 ek = event->attr.exclude_kernel;
695 eh = event->attr.exclude_hv;
696 first = 0;
697 } else if (event->attr.exclude_user != eu ||
698 event->attr.exclude_kernel != ek ||
699 event->attr.exclude_hv != eh) {
700 return -EAGAIN;
701 }
702 }
703
704 if (eu || ek || eh)
705 for (i = 0; i < n; ++i)
706 if (cflags[i] & PPMU_LIMITED_PMC_OK)
707 cflags[i] |= PPMU_LIMITED_PMC_REQD;
708
709 return 0;
710}
711
Eric B Munson86c74ab2011-04-15 08:12:30 +0000712static u64 check_and_compute_delta(u64 prev, u64 val)
713{
714 u64 delta = (val - prev) & 0xfffffffful;
715
716 /*
717 * POWER7 can roll back counter values, if the new value is smaller
718 * than the previous value it will cause the delta and the counter to
719 * have bogus values unless we rolled a counter over. If a coutner is
720 * rolled back, it will be smaller, but within 256, which is the maximum
721 * number of events to rollback at once. If we dectect a rollback
722 * return 0. This can lead to a small lack of precision in the
723 * counters.
724 */
725 if (prev > val && (prev - val) < 256)
726 delta = 0;
727
728 return delta;
729}
730
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200731static void power_pmu_read(struct perf_event *event)
732{
733 s64 val, delta, prev;
734
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200735 if (event->hw.state & PERF_HES_STOPPED)
736 return;
737
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200738 if (!event->hw.idx)
739 return;
740 /*
741 * Performance monitor interrupts come even when interrupts
742 * are soft-disabled, as long as interrupts are hard-enabled.
743 * Therefore we treat them like NMIs.
744 */
745 do {
Peter Zijlstrae7850592010-05-21 14:43:08 +0200746 prev = local64_read(&event->hw.prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200747 barrier();
748 val = read_pmc(event->hw.idx);
Eric B Munson86c74ab2011-04-15 08:12:30 +0000749 delta = check_and_compute_delta(prev, val);
750 if (!delta)
751 return;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200752 } while (local64_cmpxchg(&event->hw.prev_count, prev, val) != prev);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200753
Peter Zijlstrae7850592010-05-21 14:43:08 +0200754 local64_add(delta, &event->count);
755 local64_sub(delta, &event->hw.period_left);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200756}
757
758/*
759 * On some machines, PMC5 and PMC6 can't be written, don't respect
760 * the freeze conditions, and don't generate interrupts. This tells
761 * us if `event' is using such a PMC.
762 */
763static int is_limited_pmc(int pmcnum)
764{
765 return (ppmu->flags & PPMU_LIMITED_PMC5_6)
766 && (pmcnum == 5 || pmcnum == 6);
767}
768
Paul Mackerrasa8f90e92009-09-22 09:48:08 +1000769static void freeze_limited_counters(struct cpu_hw_events *cpuhw,
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200770 unsigned long pmc5, unsigned long pmc6)
771{
772 struct perf_event *event;
773 u64 val, prev, delta;
774 int i;
775
776 for (i = 0; i < cpuhw->n_limited; ++i) {
Paul Mackerrasa8f90e92009-09-22 09:48:08 +1000777 event = cpuhw->limited_counter[i];
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200778 if (!event->hw.idx)
779 continue;
780 val = (event->hw.idx == 5) ? pmc5 : pmc6;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200781 prev = local64_read(&event->hw.prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200782 event->hw.idx = 0;
Eric B Munson86c74ab2011-04-15 08:12:30 +0000783 delta = check_and_compute_delta(prev, val);
784 if (delta)
785 local64_add(delta, &event->count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200786 }
787}
788
Paul Mackerrasa8f90e92009-09-22 09:48:08 +1000789static void thaw_limited_counters(struct cpu_hw_events *cpuhw,
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200790 unsigned long pmc5, unsigned long pmc6)
791{
792 struct perf_event *event;
Eric B Munson86c74ab2011-04-15 08:12:30 +0000793 u64 val, prev;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200794 int i;
795
796 for (i = 0; i < cpuhw->n_limited; ++i) {
Paul Mackerrasa8f90e92009-09-22 09:48:08 +1000797 event = cpuhw->limited_counter[i];
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200798 event->hw.idx = cpuhw->limited_hwidx[i];
799 val = (event->hw.idx == 5) ? pmc5 : pmc6;
Eric B Munson86c74ab2011-04-15 08:12:30 +0000800 prev = local64_read(&event->hw.prev_count);
801 if (check_and_compute_delta(prev, val))
802 local64_set(&event->hw.prev_count, val);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200803 perf_event_update_userpage(event);
804 }
805}
806
807/*
808 * Since limited events don't respect the freeze conditions, we
809 * have to read them immediately after freezing or unfreezing the
810 * other events. We try to keep the values from the limited
811 * events as consistent as possible by keeping the delay (in
812 * cycles and instructions) between freezing/unfreezing and reading
813 * the limited events as small and consistent as possible.
814 * Therefore, if any limited events are in use, we read them
815 * both, and always in the same order, to minimize variability,
816 * and do it inside the same asm that writes MMCR0.
817 */
818static void write_mmcr0(struct cpu_hw_events *cpuhw, unsigned long mmcr0)
819{
820 unsigned long pmc5, pmc6;
821
822 if (!cpuhw->n_limited) {
823 mtspr(SPRN_MMCR0, mmcr0);
824 return;
825 }
826
827 /*
828 * Write MMCR0, then read PMC5 and PMC6 immediately.
829 * To ensure we don't get a performance monitor interrupt
830 * between writing MMCR0 and freezing/thawing the limited
831 * events, we first write MMCR0 with the event overflow
832 * interrupt enable bits turned off.
833 */
834 asm volatile("mtspr %3,%2; mfspr %0,%4; mfspr %1,%5"
835 : "=&r" (pmc5), "=&r" (pmc6)
836 : "r" (mmcr0 & ~(MMCR0_PMC1CE | MMCR0_PMCjCE)),
837 "i" (SPRN_MMCR0),
838 "i" (SPRN_PMC5), "i" (SPRN_PMC6));
839
840 if (mmcr0 & MMCR0_FC)
Paul Mackerrasa8f90e92009-09-22 09:48:08 +1000841 freeze_limited_counters(cpuhw, pmc5, pmc6);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200842 else
Paul Mackerrasa8f90e92009-09-22 09:48:08 +1000843 thaw_limited_counters(cpuhw, pmc5, pmc6);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200844
845 /*
846 * Write the full MMCR0 including the event overflow interrupt
847 * enable bits, if necessary.
848 */
849 if (mmcr0 & (MMCR0_PMC1CE | MMCR0_PMCjCE))
850 mtspr(SPRN_MMCR0, mmcr0);
851}
852
853/*
854 * Disable all events to prevent PMU interrupts and to allow
855 * events to be added or removed.
856 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200857static void power_pmu_disable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200858{
859 struct cpu_hw_events *cpuhw;
860 unsigned long flags;
861
862 if (!ppmu)
863 return;
864 local_irq_save(flags);
865 cpuhw = &__get_cpu_var(cpu_hw_events);
866
867 if (!cpuhw->disabled) {
868 cpuhw->disabled = 1;
869 cpuhw->n_added = 0;
870
871 /*
872 * Check if we ever enabled the PMU on this cpu.
873 */
874 if (!cpuhw->pmcs_enabled) {
875 ppc_enable_pmcs();
876 cpuhw->pmcs_enabled = 1;
877 }
878
879 /*
880 * Disable instruction sampling if it was enabled
881 */
882 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
883 mtspr(SPRN_MMCRA,
884 cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
885 mb();
886 }
887
888 /*
Ingo Molnar57c0c152009-09-21 12:20:38 +0200889 * Set the 'freeze counters' bit.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200890 * The barrier is to make sure the mtspr has been
891 * executed and the PMU has frozen the events
892 * before we return.
893 */
894 write_mmcr0(cpuhw, mfspr(SPRN_MMCR0) | MMCR0_FC);
895 mb();
896 }
897 local_irq_restore(flags);
898}
899
900/*
901 * Re-enable all events if disable == 0.
902 * If we were previously disabled and events were added, then
903 * put the new config on the PMU.
904 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200905static void power_pmu_enable(struct pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200906{
907 struct perf_event *event;
908 struct cpu_hw_events *cpuhw;
909 unsigned long flags;
910 long i;
911 unsigned long val;
912 s64 left;
913 unsigned int hwc_index[MAX_HWEVENTS];
914 int n_lim;
915 int idx;
916
917 if (!ppmu)
918 return;
919 local_irq_save(flags);
920 cpuhw = &__get_cpu_var(cpu_hw_events);
921 if (!cpuhw->disabled) {
922 local_irq_restore(flags);
923 return;
924 }
925 cpuhw->disabled = 0;
926
927 /*
928 * If we didn't change anything, or only removed events,
929 * no need to recalculate MMCR* settings and reset the PMCs.
930 * Just reenable the PMU with the current MMCR* settings
931 * (possibly updated for removal of events).
932 */
933 if (!cpuhw->n_added) {
934 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
935 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
936 if (cpuhw->n_events == 0)
937 ppc_set_pmu_inuse(0);
938 goto out_enable;
939 }
940
941 /*
942 * Compute MMCR* values for the new set of events
943 */
944 if (ppmu->compute_mmcr(cpuhw->events, cpuhw->n_events, hwc_index,
945 cpuhw->mmcr)) {
946 /* shouldn't ever get here */
947 printk(KERN_ERR "oops compute_mmcr failed\n");
948 goto out;
949 }
950
951 /*
952 * Add in MMCR0 freeze bits corresponding to the
953 * attr.exclude_* bits for the first event.
954 * We have already checked that all events have the
955 * same values for these bits as the first event.
956 */
957 event = cpuhw->event[0];
958 if (event->attr.exclude_user)
959 cpuhw->mmcr[0] |= MMCR0_FCP;
960 if (event->attr.exclude_kernel)
961 cpuhw->mmcr[0] |= freeze_events_kernel;
962 if (event->attr.exclude_hv)
963 cpuhw->mmcr[0] |= MMCR0_FCHV;
964
965 /*
966 * Write the new configuration to MMCR* with the freeze
967 * bit set and set the hardware events to their initial values.
968 * Then unfreeze the events.
969 */
970 ppc_set_pmu_inuse(1);
971 mtspr(SPRN_MMCRA, cpuhw->mmcr[2] & ~MMCRA_SAMPLE_ENABLE);
972 mtspr(SPRN_MMCR1, cpuhw->mmcr[1]);
973 mtspr(SPRN_MMCR0, (cpuhw->mmcr[0] & ~(MMCR0_PMC1CE | MMCR0_PMCjCE))
974 | MMCR0_FC);
975
976 /*
977 * Read off any pre-existing events that need to move
978 * to another PMC.
979 */
980 for (i = 0; i < cpuhw->n_events; ++i) {
981 event = cpuhw->event[i];
982 if (event->hw.idx && event->hw.idx != hwc_index[i] + 1) {
983 power_pmu_read(event);
984 write_pmc(event->hw.idx, 0);
985 event->hw.idx = 0;
986 }
987 }
988
989 /*
990 * Initialize the PMCs for all the new and moved events.
991 */
992 cpuhw->n_limited = n_lim = 0;
993 for (i = 0; i < cpuhw->n_events; ++i) {
994 event = cpuhw->event[i];
995 if (event->hw.idx)
996 continue;
997 idx = hwc_index[i] + 1;
998 if (is_limited_pmc(idx)) {
Paul Mackerrasa8f90e92009-09-22 09:48:08 +1000999 cpuhw->limited_counter[n_lim] = event;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001000 cpuhw->limited_hwidx[n_lim] = idx;
1001 ++n_lim;
1002 continue;
1003 }
1004 val = 0;
1005 if (event->hw.sample_period) {
Peter Zijlstrae7850592010-05-21 14:43:08 +02001006 left = local64_read(&event->hw.period_left);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001007 if (left < 0x80000000L)
1008 val = 0x80000000L - left;
1009 }
Peter Zijlstrae7850592010-05-21 14:43:08 +02001010 local64_set(&event->hw.prev_count, val);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001011 event->hw.idx = idx;
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001012 if (event->hw.state & PERF_HES_STOPPED)
1013 val = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001014 write_pmc(idx, val);
1015 perf_event_update_userpage(event);
1016 }
1017 cpuhw->n_limited = n_lim;
1018 cpuhw->mmcr[0] |= MMCR0_PMXE | MMCR0_FCECE;
1019
1020 out_enable:
1021 mb();
1022 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
1023
1024 /*
1025 * Enable instruction sampling if necessary
1026 */
1027 if (cpuhw->mmcr[2] & MMCRA_SAMPLE_ENABLE) {
1028 mb();
1029 mtspr(SPRN_MMCRA, cpuhw->mmcr[2]);
1030 }
1031
1032 out:
Anshuman Khandual3925f462013-04-22 19:42:44 +00001033 if (cpuhw->bhrb_users)
1034 ppmu->config_bhrb(cpuhw->bhrb_filter);
1035
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001036 local_irq_restore(flags);
1037}
1038
1039static int collect_events(struct perf_event *group, int max_count,
1040 struct perf_event *ctrs[], u64 *events,
1041 unsigned int *flags)
1042{
1043 int n = 0;
1044 struct perf_event *event;
1045
1046 if (!is_software_event(group)) {
1047 if (n >= max_count)
1048 return -1;
1049 ctrs[n] = group;
1050 flags[n] = group->hw.event_base;
1051 events[n++] = group->hw.config;
1052 }
Paul Mackerrasa8f90e92009-09-22 09:48:08 +10001053 list_for_each_entry(event, &group->sibling_list, group_entry) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001054 if (!is_software_event(event) &&
1055 event->state != PERF_EVENT_STATE_OFF) {
1056 if (n >= max_count)
1057 return -1;
1058 ctrs[n] = event;
1059 flags[n] = event->hw.event_base;
1060 events[n++] = event->hw.config;
1061 }
1062 }
1063 return n;
1064}
1065
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001066/*
1067 * Add a event to the PMU.
1068 * If all events are not already frozen, then we disable and
1069 * re-enable the PMU in order to get hw_perf_enable to do the
1070 * actual work of reconfiguring the PMU.
1071 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001072static int power_pmu_add(struct perf_event *event, int ef_flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001073{
1074 struct cpu_hw_events *cpuhw;
1075 unsigned long flags;
1076 int n0;
1077 int ret = -EAGAIN;
1078
1079 local_irq_save(flags);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001080 perf_pmu_disable(event->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001081
1082 /*
1083 * Add the event to the list (if there is room)
1084 * and check whether the total set is still feasible.
1085 */
1086 cpuhw = &__get_cpu_var(cpu_hw_events);
1087 n0 = cpuhw->n_events;
Paul Mackerrasa8f90e92009-09-22 09:48:08 +10001088 if (n0 >= ppmu->n_counter)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001089 goto out;
1090 cpuhw->event[n0] = event;
1091 cpuhw->events[n0] = event->hw.config;
1092 cpuhw->flags[n0] = event->hw.event_base;
Lin Ming8e6d5572010-05-08 20:28:41 +10001093
sukadev@linux.vnet.ibm.comf53d1682013-01-24 13:25:23 +00001094 /*
1095 * This event may have been disabled/stopped in record_and_restart()
1096 * because we exceeded the ->event_limit. If re-starting the event,
1097 * clear the ->hw.state (STOPPED and UPTODATE flags), so the user
1098 * notification is re-enabled.
1099 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001100 if (!(ef_flags & PERF_EF_START))
1101 event->hw.state = PERF_HES_STOPPED | PERF_HES_UPTODATE;
sukadev@linux.vnet.ibm.comf53d1682013-01-24 13:25:23 +00001102 else
1103 event->hw.state = 0;
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001104
Lin Ming8e6d5572010-05-08 20:28:41 +10001105 /*
1106 * If group events scheduling transaction was started,
Lucas De Marchi25985ed2011-03-30 22:57:33 -03001107 * skip the schedulability test here, it will be performed
Lin Ming8e6d5572010-05-08 20:28:41 +10001108 * at commit time(->commit_txn) as a whole
1109 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001110 if (cpuhw->group_flag & PERF_EVENT_TXN)
Lin Ming8e6d5572010-05-08 20:28:41 +10001111 goto nocheck;
1112
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001113 if (check_excludes(cpuhw->event, cpuhw->flags, n0, 1))
1114 goto out;
1115 if (power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n0 + 1))
1116 goto out;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001117 event->hw.config = cpuhw->events[n0];
Lin Ming8e6d5572010-05-08 20:28:41 +10001118
1119nocheck:
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001120 ++cpuhw->n_events;
1121 ++cpuhw->n_added;
1122
1123 ret = 0;
1124 out:
Anshuman Khandual3925f462013-04-22 19:42:44 +00001125 if (has_branch_stack(event))
1126 power_pmu_bhrb_enable(event);
1127
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001128 perf_pmu_enable(event->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001129 local_irq_restore(flags);
1130 return ret;
1131}
1132
1133/*
1134 * Remove a event from the PMU.
1135 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001136static void power_pmu_del(struct perf_event *event, int ef_flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001137{
1138 struct cpu_hw_events *cpuhw;
1139 long i;
1140 unsigned long flags;
1141
1142 local_irq_save(flags);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001143 perf_pmu_disable(event->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001144
1145 power_pmu_read(event);
1146
1147 cpuhw = &__get_cpu_var(cpu_hw_events);
1148 for (i = 0; i < cpuhw->n_events; ++i) {
1149 if (event == cpuhw->event[i]) {
Matt Evans219a92a2010-07-05 17:36:32 +00001150 while (++i < cpuhw->n_events) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001151 cpuhw->event[i-1] = cpuhw->event[i];
Matt Evans219a92a2010-07-05 17:36:32 +00001152 cpuhw->events[i-1] = cpuhw->events[i];
1153 cpuhw->flags[i-1] = cpuhw->flags[i];
1154 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001155 --cpuhw->n_events;
1156 ppmu->disable_pmc(event->hw.idx - 1, cpuhw->mmcr);
1157 if (event->hw.idx) {
1158 write_pmc(event->hw.idx, 0);
1159 event->hw.idx = 0;
1160 }
1161 perf_event_update_userpage(event);
1162 break;
1163 }
1164 }
1165 for (i = 0; i < cpuhw->n_limited; ++i)
Paul Mackerrasa8f90e92009-09-22 09:48:08 +10001166 if (event == cpuhw->limited_counter[i])
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001167 break;
1168 if (i < cpuhw->n_limited) {
1169 while (++i < cpuhw->n_limited) {
Paul Mackerrasa8f90e92009-09-22 09:48:08 +10001170 cpuhw->limited_counter[i-1] = cpuhw->limited_counter[i];
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001171 cpuhw->limited_hwidx[i-1] = cpuhw->limited_hwidx[i];
1172 }
1173 --cpuhw->n_limited;
1174 }
1175 if (cpuhw->n_events == 0) {
1176 /* disable exceptions if no events are running */
1177 cpuhw->mmcr[0] &= ~(MMCR0_PMXE | MMCR0_FCECE);
1178 }
1179
Anshuman Khandual3925f462013-04-22 19:42:44 +00001180 if (has_branch_stack(event))
1181 power_pmu_bhrb_disable(event);
1182
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001183 perf_pmu_enable(event->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001184 local_irq_restore(flags);
1185}
1186
1187/*
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001188 * POWER-PMU does not support disabling individual counters, hence
1189 * program their cycle counter to their max value and ignore the interrupts.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001190 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001191
1192static void power_pmu_start(struct perf_event *event, int ef_flags)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001193{
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001194 unsigned long flags;
1195 s64 left;
Anton Blanchard9a45a942012-02-15 18:48:22 +00001196 unsigned long val;
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001197
1198 if (!event->hw.idx || !event->hw.sample_period)
1199 return;
1200
1201 if (!(event->hw.state & PERF_HES_STOPPED))
1202 return;
1203
1204 if (ef_flags & PERF_EF_RELOAD)
1205 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1206
1207 local_irq_save(flags);
1208 perf_pmu_disable(event->pmu);
1209
1210 event->hw.state = 0;
1211 left = local64_read(&event->hw.period_left);
Anton Blanchard9a45a942012-02-15 18:48:22 +00001212
1213 val = 0;
1214 if (left < 0x80000000L)
1215 val = 0x80000000L - left;
1216
1217 write_pmc(event->hw.idx, val);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001218
1219 perf_event_update_userpage(event);
1220 perf_pmu_enable(event->pmu);
1221 local_irq_restore(flags);
1222}
1223
1224static void power_pmu_stop(struct perf_event *event, int ef_flags)
1225{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001226 unsigned long flags;
1227
1228 if (!event->hw.idx || !event->hw.sample_period)
1229 return;
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001230
1231 if (event->hw.state & PERF_HES_STOPPED)
1232 return;
1233
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001234 local_irq_save(flags);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001235 perf_pmu_disable(event->pmu);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001236
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001237 power_pmu_read(event);
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001238 event->hw.state |= PERF_HES_STOPPED | PERF_HES_UPTODATE;
1239 write_pmc(event->hw.idx, 0);
1240
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001241 perf_event_update_userpage(event);
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001242 perf_pmu_enable(event->pmu);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001243 local_irq_restore(flags);
1244}
1245
Lin Ming8e6d5572010-05-08 20:28:41 +10001246/*
1247 * Start group events scheduling transaction
1248 * Set the flag to make pmu::enable() not perform the
1249 * schedulability test, it will be performed at commit time
1250 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001251void power_pmu_start_txn(struct pmu *pmu)
Lin Ming8e6d5572010-05-08 20:28:41 +10001252{
1253 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1254
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001255 perf_pmu_disable(pmu);
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001256 cpuhw->group_flag |= PERF_EVENT_TXN;
Lin Ming8e6d5572010-05-08 20:28:41 +10001257 cpuhw->n_txn_start = cpuhw->n_events;
1258}
1259
1260/*
1261 * Stop group events scheduling transaction
1262 * Clear the flag and pmu::enable() will perform the
1263 * schedulability test.
1264 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001265void power_pmu_cancel_txn(struct pmu *pmu)
Lin Ming8e6d5572010-05-08 20:28:41 +10001266{
1267 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1268
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001269 cpuhw->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001270 perf_pmu_enable(pmu);
Lin Ming8e6d5572010-05-08 20:28:41 +10001271}
1272
1273/*
1274 * Commit group events scheduling transaction
1275 * Perform the group schedulability test as a whole
1276 * Return 0 if success
1277 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001278int power_pmu_commit_txn(struct pmu *pmu)
Lin Ming8e6d5572010-05-08 20:28:41 +10001279{
1280 struct cpu_hw_events *cpuhw;
1281 long i, n;
1282
1283 if (!ppmu)
1284 return -EAGAIN;
1285 cpuhw = &__get_cpu_var(cpu_hw_events);
1286 n = cpuhw->n_events;
1287 if (check_excludes(cpuhw->event, cpuhw->flags, 0, n))
1288 return -EAGAIN;
1289 i = power_check_constraints(cpuhw, cpuhw->events, cpuhw->flags, n);
1290 if (i < 0)
1291 return -EAGAIN;
1292
1293 for (i = cpuhw->n_txn_start; i < n; ++i)
1294 cpuhw->event[i]->hw.config = cpuhw->events[i];
1295
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001296 cpuhw->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001297 perf_pmu_enable(pmu);
Lin Ming8e6d5572010-05-08 20:28:41 +10001298 return 0;
1299}
1300
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001301/*
1302 * Return 1 if we might be able to put event on a limited PMC,
1303 * or 0 if not.
1304 * A event can only go on a limited PMC if it counts something
1305 * that a limited PMC can count, doesn't require interrupts, and
1306 * doesn't exclude any processor mode.
1307 */
1308static int can_go_on_limited_pmc(struct perf_event *event, u64 ev,
1309 unsigned int flags)
1310{
1311 int n;
1312 u64 alt[MAX_EVENT_ALTERNATIVES];
1313
1314 if (event->attr.exclude_user
1315 || event->attr.exclude_kernel
1316 || event->attr.exclude_hv
1317 || event->attr.sample_period)
1318 return 0;
1319
1320 if (ppmu->limited_pmc_event(ev))
1321 return 1;
1322
1323 /*
1324 * The requested event_id isn't on a limited PMC already;
1325 * see if any alternative code goes on a limited PMC.
1326 */
1327 if (!ppmu->get_alternatives)
1328 return 0;
1329
1330 flags |= PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD;
1331 n = ppmu->get_alternatives(ev, flags, alt);
1332
1333 return n > 0;
1334}
1335
1336/*
1337 * Find an alternative event_id that goes on a normal PMC, if possible,
1338 * and return the event_id code, or 0 if there is no such alternative.
1339 * (Note: event_id code 0 is "don't count" on all machines.)
1340 */
1341static u64 normal_pmc_alternative(u64 ev, unsigned long flags)
1342{
1343 u64 alt[MAX_EVENT_ALTERNATIVES];
1344 int n;
1345
1346 flags &= ~(PPMU_LIMITED_PMC_OK | PPMU_LIMITED_PMC_REQD);
1347 n = ppmu->get_alternatives(ev, flags, alt);
1348 if (!n)
1349 return 0;
1350 return alt[0];
1351}
1352
1353/* Number of perf_events counting hardware events */
1354static atomic_t num_events;
1355/* Used to avoid races in calling reserve/release_pmc_hardware */
1356static DEFINE_MUTEX(pmc_reserve_mutex);
1357
1358/*
1359 * Release the PMU if this is the last perf_event.
1360 */
1361static void hw_perf_event_destroy(struct perf_event *event)
1362{
1363 if (!atomic_add_unless(&num_events, -1, 1)) {
1364 mutex_lock(&pmc_reserve_mutex);
1365 if (atomic_dec_return(&num_events) == 0)
1366 release_pmc_hardware();
1367 mutex_unlock(&pmc_reserve_mutex);
1368 }
1369}
1370
1371/*
1372 * Translate a generic cache event_id config to a raw event_id code.
1373 */
1374static int hw_perf_cache_event(u64 config, u64 *eventp)
1375{
1376 unsigned long type, op, result;
1377 int ev;
1378
1379 if (!ppmu->cache_events)
1380 return -EINVAL;
1381
1382 /* unpack config */
1383 type = config & 0xff;
1384 op = (config >> 8) & 0xff;
1385 result = (config >> 16) & 0xff;
1386
1387 if (type >= PERF_COUNT_HW_CACHE_MAX ||
1388 op >= PERF_COUNT_HW_CACHE_OP_MAX ||
1389 result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1390 return -EINVAL;
1391
1392 ev = (*ppmu->cache_events)[type][op][result];
1393 if (ev == 0)
1394 return -EOPNOTSUPP;
1395 if (ev == -1)
1396 return -EINVAL;
1397 *eventp = ev;
1398 return 0;
1399}
1400
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001401static int power_pmu_event_init(struct perf_event *event)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001402{
1403 u64 ev;
1404 unsigned long flags;
1405 struct perf_event *ctrs[MAX_HWEVENTS];
1406 u64 events[MAX_HWEVENTS];
1407 unsigned int cflags[MAX_HWEVENTS];
1408 int n;
1409 int err;
1410 struct cpu_hw_events *cpuhw;
1411
1412 if (!ppmu)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001413 return -ENOENT;
1414
Anshuman Khandual3925f462013-04-22 19:42:44 +00001415 if (has_branch_stack(event)) {
1416 /* PMU has BHRB enabled */
1417 if (!(ppmu->flags & PPMU_BHRB))
1418 return -EOPNOTSUPP;
1419 }
Stephane Eranian2481c5f2012-02-09 23:20:59 +01001420
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001421 switch (event->attr.type) {
1422 case PERF_TYPE_HARDWARE:
1423 ev = event->attr.config;
1424 if (ev >= ppmu->n_generic || ppmu->generic_events[ev] == 0)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001425 return -EOPNOTSUPP;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001426 ev = ppmu->generic_events[ev];
1427 break;
1428 case PERF_TYPE_HW_CACHE:
1429 err = hw_perf_cache_event(event->attr.config, &ev);
1430 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001431 return err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001432 break;
1433 case PERF_TYPE_RAW:
1434 ev = event->attr.config;
1435 break;
1436 default:
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001437 return -ENOENT;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001438 }
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001439
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001440 event->hw.config_base = ev;
1441 event->hw.idx = 0;
1442
1443 /*
1444 * If we are not running on a hypervisor, force the
1445 * exclude_hv bit to 0 so that we don't care what
1446 * the user set it to.
1447 */
1448 if (!firmware_has_feature(FW_FEATURE_LPAR))
1449 event->attr.exclude_hv = 0;
1450
1451 /*
1452 * If this is a per-task event, then we can use
1453 * PM_RUN_* events interchangeably with their non RUN_*
1454 * equivalents, e.g. PM_RUN_CYC instead of PM_CYC.
1455 * XXX we should check if the task is an idle task.
1456 */
1457 flags = 0;
Paul Mackerras57fa7212010-10-19 16:55:35 +11001458 if (event->attach_state & PERF_ATTACH_TASK)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001459 flags |= PPMU_ONLY_COUNT_RUN;
1460
1461 /*
1462 * If this machine has limited events, check whether this
1463 * event_id could go on a limited event.
1464 */
1465 if (ppmu->flags & PPMU_LIMITED_PMC5_6) {
1466 if (can_go_on_limited_pmc(event, ev, flags)) {
1467 flags |= PPMU_LIMITED_PMC_OK;
1468 } else if (ppmu->limited_pmc_event(ev)) {
1469 /*
1470 * The requested event_id is on a limited PMC,
1471 * but we can't use a limited PMC; see if any
1472 * alternative goes on a normal PMC.
1473 */
1474 ev = normal_pmc_alternative(ev, flags);
1475 if (!ev)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001476 return -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001477 }
1478 }
1479
1480 /*
1481 * If this is in a group, check if it can go on with all the
1482 * other hardware events in the group. We assume the event
1483 * hasn't been linked into its leader's sibling list at this point.
1484 */
1485 n = 0;
1486 if (event->group_leader != event) {
Paul Mackerrasa8f90e92009-09-22 09:48:08 +10001487 n = collect_events(event->group_leader, ppmu->n_counter - 1,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001488 ctrs, events, cflags);
1489 if (n < 0)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001490 return -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001491 }
1492 events[n] = ev;
1493 ctrs[n] = event;
1494 cflags[n] = flags;
1495 if (check_excludes(ctrs, cflags, n, 1))
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001496 return -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001497
1498 cpuhw = &get_cpu_var(cpu_hw_events);
1499 err = power_check_constraints(cpuhw, events, cflags, n + 1);
Anshuman Khandual3925f462013-04-22 19:42:44 +00001500
1501 if (has_branch_stack(event)) {
1502 cpuhw->bhrb_filter = ppmu->bhrb_filter_map(
1503 event->attr.branch_sample_type);
1504
1505 if(cpuhw->bhrb_filter == -1)
1506 return -EOPNOTSUPP;
1507 }
1508
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001509 put_cpu_var(cpu_hw_events);
1510 if (err)
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001511 return -EINVAL;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001512
1513 event->hw.config = events[n];
1514 event->hw.event_base = cflags[n];
1515 event->hw.last_period = event->hw.sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001516 local64_set(&event->hw.period_left, event->hw.last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001517
1518 /*
1519 * See if we need to reserve the PMU.
1520 * If no events are currently in use, then we have to take a
1521 * mutex to ensure that we don't race with another task doing
1522 * reserve_pmc_hardware or release_pmc_hardware.
1523 */
1524 err = 0;
1525 if (!atomic_inc_not_zero(&num_events)) {
1526 mutex_lock(&pmc_reserve_mutex);
1527 if (atomic_read(&num_events) == 0 &&
1528 reserve_pmc_hardware(perf_event_interrupt))
1529 err = -EBUSY;
1530 else
1531 atomic_inc(&num_events);
1532 mutex_unlock(&pmc_reserve_mutex);
1533 }
1534 event->destroy = hw_perf_event_destroy;
1535
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001536 return err;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001537}
1538
Peter Zijlstra35edc2a2011-11-20 20:36:02 +01001539static int power_pmu_event_idx(struct perf_event *event)
1540{
1541 return event->hw.idx;
1542}
1543
Sukadev Bhattiprolu1c53a272013-01-22 22:24:54 -08001544ssize_t power_events_sysfs_show(struct device *dev,
1545 struct device_attribute *attr, char *page)
1546{
1547 struct perf_pmu_events_attr *pmu_attr;
1548
1549 pmu_attr = container_of(attr, struct perf_pmu_events_attr, attr);
1550
1551 return sprintf(page, "event=0x%02llx\n", pmu_attr->id);
1552}
1553
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001554struct pmu power_pmu = {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001555 .pmu_enable = power_pmu_enable,
1556 .pmu_disable = power_pmu_disable,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001557 .event_init = power_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001558 .add = power_pmu_add,
1559 .del = power_pmu_del,
1560 .start = power_pmu_start,
1561 .stop = power_pmu_stop,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001562 .read = power_pmu_read,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001563 .start_txn = power_pmu_start_txn,
1564 .cancel_txn = power_pmu_cancel_txn,
1565 .commit_txn = power_pmu_commit_txn,
Peter Zijlstra35edc2a2011-11-20 20:36:02 +01001566 .event_idx = power_pmu_event_idx,
Anshuman Khandual3925f462013-04-22 19:42:44 +00001567 .flush_branch_stack = power_pmu_flush_branch_stack,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001568};
1569
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001570/*
Ingo Molnar57c0c152009-09-21 12:20:38 +02001571 * A counter has overflowed; update its count and record
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001572 * things if requested. Note that interrupts are hard-disabled
1573 * here so there is no possibility of being interrupted.
1574 */
1575static void record_and_restart(struct perf_event *event, unsigned long val,
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001576 struct pt_regs *regs)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001577{
1578 u64 period = event->hw.sample_period;
1579 s64 prev, delta, left;
1580 int record = 0;
1581
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001582 if (event->hw.state & PERF_HES_STOPPED) {
1583 write_pmc(event->hw.idx, 0);
1584 return;
1585 }
1586
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001587 /* we don't have to worry about interrupts here */
Peter Zijlstrae7850592010-05-21 14:43:08 +02001588 prev = local64_read(&event->hw.prev_count);
Eric B Munson86c74ab2011-04-15 08:12:30 +00001589 delta = check_and_compute_delta(prev, val);
Peter Zijlstrae7850592010-05-21 14:43:08 +02001590 local64_add(delta, &event->count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001591
1592 /*
1593 * See if the total period for this event has expired,
1594 * and update for the next period.
1595 */
1596 val = 0;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001597 left = local64_read(&event->hw.period_left) - delta;
Michael Neulinge13e8952012-11-05 15:08:38 +00001598 if (delta == 0)
1599 left++;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001600 if (period) {
1601 if (left <= 0) {
1602 left += period;
1603 if (left <= 0)
1604 left = period;
sukadev@linux.vnet.ibm.come6878832012-09-18 20:56:11 +00001605 record = siar_valid(regs);
Anton Blanchard4bca7702011-01-17 16:17:42 +11001606 event->hw.last_period = event->hw.sample_period;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001607 }
1608 if (left < 0x80000000LL)
1609 val = 0x80000000LL - left;
1610 }
1611
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001612 write_pmc(event->hw.idx, val);
1613 local64_set(&event->hw.prev_count, val);
1614 local64_set(&event->hw.period_left, left);
1615 perf_event_update_userpage(event);
1616
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001617 /*
1618 * Finally record data if requested.
1619 */
1620 if (record) {
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001621 struct perf_sample_data data;
1622
Robert Richterfd0d0002012-04-02 20:19:08 +02001623 perf_sample_data_init(&data, ~0ULL, event->hw.last_period);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001624
1625 if (event->attr.sample_type & PERF_SAMPLE_ADDR)
1626 perf_get_data_addr(regs, &data.addr);
1627
Anshuman Khandual3925f462013-04-22 19:42:44 +00001628 if (event->attr.sample_type & PERF_SAMPLE_BRANCH_STACK) {
1629 struct cpu_hw_events *cpuhw;
1630 cpuhw = &__get_cpu_var(cpu_hw_events);
1631 power_pmu_bhrb_read(cpuhw);
1632 data.br_stack = &cpuhw->bhrb_stack;
1633 }
1634
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001635 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001636 power_pmu_stop(event, 0);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001637 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001638}
1639
1640/*
1641 * Called from generic code to get the misc flags (i.e. processor mode)
1642 * for an event_id.
1643 */
1644unsigned long perf_misc_flags(struct pt_regs *regs)
1645{
1646 u32 flags = perf_get_misc_flags(regs);
1647
1648 if (flags)
1649 return flags;
1650 return user_mode(regs) ? PERF_RECORD_MISC_USER :
1651 PERF_RECORD_MISC_KERNEL;
1652}
1653
1654/*
1655 * Called from generic code to get the instruction pointer
1656 * for an event_id.
1657 */
1658unsigned long perf_instruction_pointer(struct pt_regs *regs)
1659{
Michael Ellerman33904052013-04-25 19:28:25 +00001660 bool use_siar = regs_use_siar(regs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001661
sukadev@linux.vnet.ibm.come6878832012-09-18 20:56:11 +00001662 if (use_siar && siar_valid(regs))
Anton Blanchard75382aa2012-06-26 01:01:36 +00001663 return mfspr(SPRN_SIAR) + perf_ip_adjust(regs);
sukadev@linux.vnet.ibm.come6878832012-09-18 20:56:11 +00001664 else if (use_siar)
1665 return 0; // no valid instruction pointer
Anton Blanchard75382aa2012-06-26 01:01:36 +00001666 else
Benjamin Herrenschmidt1ce447b2012-03-26 20:47:34 +00001667 return regs->nip;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001668}
1669
Michael Neulingbc09c212012-11-05 15:53:54 +00001670static bool pmc_overflow_power7(unsigned long val)
Anton Blanchard0837e322011-03-09 14:38:42 +11001671{
Anton Blanchard0837e322011-03-09 14:38:42 +11001672 /*
1673 * Events on POWER7 can roll back if a speculative event doesn't
1674 * eventually complete. Unfortunately in some rare cases they will
1675 * raise a performance monitor exception. We need to catch this to
1676 * ensure we reset the PMC. In all cases the PMC will be 256 or less
1677 * cycles from overflow.
1678 *
1679 * We only do this if the first pass fails to find any overflowing
1680 * PMCs because a user might set a period of less than 256 and we
1681 * don't want to mistakenly reset them.
1682 */
Michael Neulingbc09c212012-11-05 15:53:54 +00001683 if ((0x80000000 - val) <= 256)
1684 return true;
1685
1686 return false;
1687}
1688
1689static bool pmc_overflow(unsigned long val)
1690{
1691 if ((int)val < 0)
Anton Blanchard0837e322011-03-09 14:38:42 +11001692 return true;
1693
1694 return false;
1695}
1696
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001697/*
1698 * Performance monitor interrupt stuff
1699 */
1700static void perf_event_interrupt(struct pt_regs *regs)
1701{
Michael Neulingbc09c212012-11-05 15:53:54 +00001702 int i, j;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001703 struct cpu_hw_events *cpuhw = &__get_cpu_var(cpu_hw_events);
1704 struct perf_event *event;
Michael Neulingbc09c212012-11-05 15:53:54 +00001705 unsigned long val[8];
1706 int found, active;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001707 int nmi;
1708
1709 if (cpuhw->n_limited)
Paul Mackerrasa8f90e92009-09-22 09:48:08 +10001710 freeze_limited_counters(cpuhw, mfspr(SPRN_PMC5),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001711 mfspr(SPRN_PMC6));
1712
1713 perf_read_regs(regs);
1714
1715 nmi = perf_intr_is_nmi(regs);
1716 if (nmi)
1717 nmi_enter();
1718 else
1719 irq_enter();
1720
Michael Neulingbc09c212012-11-05 15:53:54 +00001721 /* Read all the PMCs since we'll need them a bunch of times */
1722 for (i = 0; i < ppmu->n_counter; ++i)
1723 val[i] = read_pmc(i + 1);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001724
Michael Neulingbc09c212012-11-05 15:53:54 +00001725 /* Try to find what caused the IRQ */
1726 found = 0;
1727 for (i = 0; i < ppmu->n_counter; ++i) {
1728 if (!pmc_overflow(val[i]))
1729 continue;
1730 if (is_limited_pmc(i + 1))
1731 continue; /* these won't generate IRQs */
1732 /*
1733 * We've found one that's overflowed. For active
1734 * counters we need to log this. For inactive
1735 * counters, we need to reset it anyway
1736 */
1737 found = 1;
1738 active = 0;
1739 for (j = 0; j < cpuhw->n_events; ++j) {
1740 event = cpuhw->event[j];
1741 if (event->hw.idx == (i + 1)) {
1742 active = 1;
1743 record_and_restart(event, val[i], regs);
1744 break;
1745 }
1746 }
1747 if (!active)
1748 /* reset non active counters that have overflowed */
1749 write_pmc(i + 1, 0);
1750 }
1751 if (!found && pvr_version_is(PVR_POWER7)) {
1752 /* check active counters for special buggy p7 overflow */
1753 for (i = 0; i < cpuhw->n_events; ++i) {
1754 event = cpuhw->event[i];
1755 if (!event->hw.idx || is_limited_pmc(event->hw.idx))
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001756 continue;
Michael Neulingbc09c212012-11-05 15:53:54 +00001757 if (pmc_overflow_power7(val[event->hw.idx - 1])) {
1758 /* event has overflowed in a buggy way*/
1759 found = 1;
1760 record_and_restart(event,
1761 val[event->hw.idx - 1],
1762 regs);
1763 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001764 }
1765 }
Michael Neulingbc09c212012-11-05 15:53:54 +00001766 if ((!found) && printk_ratelimit())
1767 printk(KERN_WARNING "Can't find PMC that caused IRQ\n");
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001768
1769 /*
1770 * Reset MMCR0 to its normal value. This will set PMXE and
Ingo Molnar57c0c152009-09-21 12:20:38 +02001771 * clear FC (freeze counters) and PMAO (perf mon alert occurred)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001772 * and thus allow interrupts to occur again.
1773 * XXX might want to use MSR.PM to keep the events frozen until
1774 * we get back out of this interrupt.
1775 */
1776 write_mmcr0(cpuhw, cpuhw->mmcr[0]);
1777
1778 if (nmi)
1779 nmi_exit();
1780 else
1781 irq_exit();
1782}
1783
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001784static void power_pmu_setup(int cpu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001785{
1786 struct cpu_hw_events *cpuhw = &per_cpu(cpu_hw_events, cpu);
1787
1788 if (!ppmu)
1789 return;
1790 memset(cpuhw, 0, sizeof(*cpuhw));
1791 cpuhw->mmcr[0] = MMCR0_FC;
1792}
1793
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001794static int __cpuinit
Peter Zijlstra85cfabb2010-03-11 13:06:56 +01001795power_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001796{
1797 unsigned int cpu = (long)hcpu;
1798
1799 switch (action & ~CPU_TASKS_FROZEN) {
1800 case CPU_UP_PREPARE:
1801 power_pmu_setup(cpu);
1802 break;
1803
1804 default:
1805 break;
1806 }
1807
1808 return NOTIFY_OK;
1809}
1810
Dmitry Eremin-Solenikov77c23422011-06-29 04:54:00 +00001811int __cpuinit register_power_pmu(struct power_pmu *pmu)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001812{
1813 if (ppmu)
1814 return -EBUSY; /* something's already registered */
1815
1816 ppmu = pmu;
1817 pr_info("%s performance monitor hardware support registered\n",
1818 pmu->name);
1819
Sukadev Bhattiprolu1c53a272013-01-22 22:24:54 -08001820 power_pmu.attr_groups = ppmu->attr_groups;
1821
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001822#ifdef MSR_HV
1823 /*
1824 * Use FCHV to ignore kernel events if MSR.HV is set.
1825 */
1826 if (mfmsr() & MSR_HV)
1827 freeze_events_kernel = MMCR0_FCHV;
1828#endif /* CONFIG_PPC64 */
1829
Peter Zijlstra2e80a822010-11-17 23:17:36 +01001830 perf_pmu_register(&power_pmu, "cpu", PERF_TYPE_RAW);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001831 perf_cpu_notifier(power_pmu_notifier);
1832
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001833 return 0;
1834}