blob: c97d5b52d12a47f75e88568004965428017db03b [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020024#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020025#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010026#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010027
Ingo Molnar241771e2008-12-03 10:39:53 +010028#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020029#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020030#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010031
Peter Zijlstra7645a242010-03-08 13:51:31 +010032#if 0
33#undef wrmsrl
34#define wrmsrl(msr, val) \
35do { \
36 trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
37 (unsigned long)(val)); \
38 native_write_msr((msr), (u32)((u64)(val)), \
39 (u32)((u64)(val) >> 32)); \
40} while (0)
41#endif
42
Peter Zijlstraef21f682010-03-03 13:12:23 +010043/*
44 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
45 */
46static unsigned long
47copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
48{
49 unsigned long offset, addr = (unsigned long)from;
50 int type = in_nmi() ? KM_NMI : KM_IRQ0;
51 unsigned long size, len = 0;
52 struct page *page;
53 void *map;
54 int ret;
55
56 do {
57 ret = __get_user_pages_fast(addr, 1, 0, &page);
58 if (!ret)
59 break;
60
61 offset = addr & (PAGE_SIZE - 1);
62 size = min(PAGE_SIZE - offset, n - len);
63
64 map = kmap_atomic(page, type);
65 memcpy(to, map+offset, size);
66 kunmap_atomic(map, type);
67 put_page(page);
68
69 len += size;
70 to += size;
71 addr += size;
72
73 } while (len < n);
74
75 return len;
76}
77
Stephane Eranian1da53e02010-01-18 10:58:01 +020078struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010079 union {
80 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab622d642010-02-01 15:36:30 +010081 u64 idxmsk64;
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010082 };
Peter Zijlstrab622d642010-02-01 15:36:30 +010083 u64 code;
84 u64 cmask;
Peter Zijlstra272d30b2010-01-22 16:32:17 +010085 int weight;
Stephane Eranian1da53e02010-01-18 10:58:01 +020086};
87
Stephane Eranian38331f62010-02-08 17:17:01 +020088struct amd_nb {
89 int nb_id; /* NorthBridge id */
90 int refcnt; /* reference count */
91 struct perf_event *owners[X86_PMC_IDX_MAX];
92 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
93};
94
Peter Zijlstracaff2be2010-03-03 12:02:30 +010095#define MAX_LBR_ENTRIES 16
96
Ingo Molnarcdd6c482009-09-21 12:02:48 +020097struct cpu_hw_events {
Peter Zijlstraca037702010-03-02 19:52:12 +010098 /*
99 * Generic x86 PMC bits
100 */
Stephane Eranian1da53e02010-01-18 10:58:01 +0200101 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +0200102 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100103 int enabled;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200104
105 int n_events;
106 int n_added;
107 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
Stephane Eranian447a1942010-02-01 14:50:01 +0200108 u64 tags[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200109 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Peter Zijlstraca037702010-03-02 19:52:12 +0100110
111 /*
112 * Intel DebugStore bits
113 */
114 struct debug_store *ds;
115 u64 pebs_enabled;
116
117 /*
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100118 * Intel LBR bits
119 */
120 int lbr_users;
121 void *lbr_context;
122 struct perf_branch_stack lbr_stack;
123 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
124
125 /*
Peter Zijlstraca037702010-03-02 19:52:12 +0100126 * AMD specific bits
127 */
Stephane Eranian38331f62010-02-08 17:17:01 +0200128 struct amd_nb *amd_nb;
Ingo Molnar241771e2008-12-03 10:39:53 +0100129};
130
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100131#define __EVENT_CONSTRAINT(c, n, m, w) {\
Peter Zijlstrab622d642010-02-01 15:36:30 +0100132 { .idxmsk64 = (n) }, \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100133 .code = (c), \
134 .cmask = (m), \
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100135 .weight = (w), \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100136}
Stephane Eranianb6900812009-10-06 16:42:09 +0200137
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100138#define EVENT_CONSTRAINT(c, n, m) \
139 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
140
Peter Zijlstraca037702010-03-02 19:52:12 +0100141/*
142 * Constraint on the Event code.
143 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100144#define INTEL_EVENT_CONSTRAINT(c, n) \
145 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100146
Peter Zijlstraca037702010-03-02 19:52:12 +0100147/*
148 * Constraint on the Event code + UMask + fixed-mask
149 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100150#define FIXED_EVENT_CONSTRAINT(c, n) \
Peter Zijlstrab622d642010-02-01 15:36:30 +0100151 EVENT_CONSTRAINT(c, (1ULL << (32+n)), INTEL_ARCH_FIXED_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100152
Peter Zijlstraca037702010-03-02 19:52:12 +0100153/*
154 * Constraint on the Event code + UMask
155 */
156#define PEBS_EVENT_CONSTRAINT(c, n) \
157 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
158
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100159#define EVENT_CONSTRAINT_END \
160 EVENT_CONSTRAINT(0, 0, 0)
161
162#define for_each_event_constraint(e, c) \
163 for ((e) = (c); (e)->cmask; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200164
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100165union perf_capabilities {
166 struct {
167 u64 lbr_format : 6;
168 u64 pebs_trap : 1;
169 u64 pebs_arch_reg : 1;
170 u64 pebs_format : 4;
171 u64 smm_freeze : 1;
172 };
173 u64 capabilities;
174};
175
Ingo Molnar241771e2008-12-03 10:39:53 +0100176/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200177 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100178 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200179struct x86_pmu {
Peter Zijlstraca037702010-03-02 19:52:12 +0100180 /*
181 * Generic x86 PMC bits
182 */
Robert Richterfaa28ae2009-04-29 12:47:13 +0200183 const char *name;
184 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800185 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200186 void (*disable_all)(void);
187 void (*enable_all)(void);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100188 void (*enable)(struct perf_event *);
189 void (*disable)(struct perf_event *);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300190 int (*hw_config)(struct perf_event_attr *attr, struct hw_perf_event *hwc);
191 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530192 unsigned eventsel;
193 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100194 u64 (*event_map)(int);
195 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530196 int max_events;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200197 int num_events;
198 int num_events_fixed;
199 int event_bits;
200 u64 event_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200201 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200202 u64 max_period;
Peter Zijlstra63b14642010-01-22 16:32:17 +0100203 struct event_constraint *
204 (*get_event_constraints)(struct cpu_hw_events *cpuc,
205 struct perf_event *event);
206
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100207 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
208 struct perf_event *event);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100209 struct event_constraint *event_constraints;
Peter Zijlstra3c447802010-03-04 21:49:01 +0100210 void (*quirks)(void);
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100211
212 void (*cpu_prepare)(int cpu);
213 void (*cpu_starting)(int cpu);
214 void (*cpu_dying)(int cpu);
215 void (*cpu_dead)(int cpu);
Peter Zijlstraca037702010-03-02 19:52:12 +0100216
217 /*
218 * Intel Arch Perfmon v2+
219 */
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100220 u64 intel_ctrl;
221 union perf_capabilities intel_cap;
Peter Zijlstraca037702010-03-02 19:52:12 +0100222
223 /*
224 * Intel DebugStore bits
225 */
226 int bts, pebs;
227 int pebs_record_size;
228 void (*drain_pebs)(struct pt_regs *regs);
229 struct event_constraint *pebs_constraints;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100230
231 /*
232 * Intel LBR
233 */
234 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
235 int lbr_nr; /* hardware stack size */
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530236};
237
Robert Richter4a06bd82009-04-29 12:47:11 +0200238static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530239
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200240static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100241 .enabled = 1,
242};
Ingo Molnar241771e2008-12-03 10:39:53 +0100243
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100244static int x86_perf_event_set_period(struct perf_event *event);
Stephane Eranianb6900812009-10-06 16:42:09 +0200245
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530246/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200247 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200248 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200249 * 'not supported', -1 means 'hw_event makes no sense on
250 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200251 * ID.
252 */
253
254#define C(x) PERF_COUNT_HW_CACHE_##x
255
256static u64 __read_mostly hw_cache_event_ids
257 [PERF_COUNT_HW_CACHE_MAX]
258 [PERF_COUNT_HW_CACHE_OP_MAX]
259 [PERF_COUNT_HW_CACHE_RESULT_MAX];
260
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530261/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200262 * Propagate event elapsed time into the generic event.
263 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100264 * Returns the delta events processed.
265 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200266static u64
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100267x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +0100268{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100269 struct hw_perf_event *hwc = &event->hw;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200270 int shift = 64 - x86_pmu.event_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200271 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100272 int idx = hwc->idx;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200273 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100274
Markus Metzger30dd5682009-07-21 15:56:48 +0200275 if (idx == X86_PMC_IDX_FIXED_BTS)
276 return 0;
277
Ingo Molnaree060942008-12-13 09:00:03 +0100278 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200279 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100280 *
281 * Our tactic to handle this is to first atomically read and
282 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200283 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100284 */
285again:
286 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200287 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100288
289 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
290 new_raw_count) != prev_raw_count)
291 goto again;
292
293 /*
294 * Now we have the new raw value and have updated the prev
295 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200296 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100297 *
298 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200299 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100300 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200301 delta = (new_raw_count << shift) - (prev_raw_count << shift);
302 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100303
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200304 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100305 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200306
307 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100308}
309
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200310static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200311static DEFINE_MUTEX(pmc_reserve_mutex);
312
Robert Richterb27ea292010-03-17 12:49:10 +0100313#ifdef CONFIG_X86_LOCAL_APIC
314
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200315static bool reserve_pmc_hardware(void)
316{
317 int i;
318
319 if (nmi_watchdog == NMI_LOCAL_APIC)
320 disable_lapic_nmi_watchdog();
321
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200322 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200323 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200324 goto perfctr_fail;
325 }
326
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200327 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200328 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200329 goto eventsel_fail;
330 }
331
332 return true;
333
334eventsel_fail:
335 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200336 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200337
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200338 i = x86_pmu.num_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200339
340perfctr_fail:
341 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200342 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200343
344 if (nmi_watchdog == NMI_LOCAL_APIC)
345 enable_lapic_nmi_watchdog();
346
347 return false;
348}
349
350static void release_pmc_hardware(void)
351{
352 int i;
353
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200354 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200355 release_perfctr_nmi(x86_pmu.perfctr + i);
356 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200357 }
358
359 if (nmi_watchdog == NMI_LOCAL_APIC)
360 enable_lapic_nmi_watchdog();
361}
362
Robert Richterb27ea292010-03-17 12:49:10 +0100363#else
364
365static bool reserve_pmc_hardware(void) { return true; }
366static void release_pmc_hardware(void) {}
367
368#endif
369
Peter Zijlstraca037702010-03-02 19:52:12 +0100370static int reserve_ds_buffers(void);
371static void release_ds_buffers(void);
Markus Metzger30dd5682009-07-21 15:56:48 +0200372
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200373static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200374{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200375 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200376 release_pmc_hardware();
Peter Zijlstraca037702010-03-02 19:52:12 +0100377 release_ds_buffers();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200378 mutex_unlock(&pmc_reserve_mutex);
379 }
380}
381
Robert Richter85cf9db2009-04-29 12:47:20 +0200382static inline int x86_pmu_initialized(void)
383{
384 return x86_pmu.handle_irq != NULL;
385}
386
Ingo Molnar8326f442009-06-05 20:22:46 +0200387static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200388set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200389{
390 unsigned int cache_type, cache_op, cache_result;
391 u64 config, val;
392
393 config = attr->config;
394
395 cache_type = (config >> 0) & 0xff;
396 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
397 return -EINVAL;
398
399 cache_op = (config >> 8) & 0xff;
400 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
401 return -EINVAL;
402
403 cache_result = (config >> 16) & 0xff;
404 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
405 return -EINVAL;
406
407 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
408
409 if (val == 0)
410 return -ENOENT;
411
412 if (val == -1)
413 return -EINVAL;
414
415 hwc->config |= val;
416
417 return 0;
418}
419
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300420static int x86_hw_config(struct perf_event_attr *attr, struct hw_perf_event *hwc)
421{
422 /*
423 * Generate PMC IRQs:
424 * (keep 'enabled' bit clear for now)
425 */
426 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
427
428 /*
429 * Count user and OS events unless requested not to
430 */
431 if (!attr->exclude_user)
432 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
433 if (!attr->exclude_kernel)
434 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
435
436 return 0;
437}
438
Ingo Molnaree060942008-12-13 09:00:03 +0100439/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200440 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100441 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200442static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100443{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200444 struct perf_event_attr *attr = &event->attr;
445 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200446 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200447 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100448
Robert Richter85cf9db2009-04-29 12:47:20 +0200449 if (!x86_pmu_initialized())
450 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100451
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200452 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200453 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200454 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200455 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200456 if (!reserve_pmc_hardware())
457 err = -EBUSY;
458 else
Peter Zijlstraca037702010-03-02 19:52:12 +0100459 err = reserve_ds_buffers();
Markus Metzger30dd5682009-07-21 15:56:48 +0200460 }
461 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200462 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200463 mutex_unlock(&pmc_reserve_mutex);
464 }
465 if (err)
466 return err;
467
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200468 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200469
Stephane Eranianb6900812009-10-06 16:42:09 +0200470 hwc->idx = -1;
Stephane Eranian447a1942010-02-01 14:50:01 +0200471 hwc->last_cpu = -1;
472 hwc->last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200473
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300474 /* Processor specifics */
Robert Richter984763c2010-03-16 17:07:33 +0100475 err = x86_pmu.hw_config(attr, hwc);
476 if (err)
477 return err;
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100478
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200479 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +0200480 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200481 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200482 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +0200483 } else {
484 /*
485 * If we have a PMU initialized but no APIC
486 * interrupts, we cannot sample hardware
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200487 * events (user-space has to fall back and
488 * sample via a hrtimer based software event):
Ingo Molnar04da8a42009-08-11 10:40:08 +0200489 */
490 if (!x86_pmu.apic)
491 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200492 }
Ingo Molnard2517a42009-05-17 10:04:45 +0200493
Ingo Molnar241771e2008-12-03 10:39:53 +0100494 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200495 * Raw hw_event type provide the config in the hw_event structure
Ingo Molnar241771e2008-12-03 10:39:53 +0100496 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200497 if (attr->type == PERF_TYPE_RAW) {
498 hwc->config |= x86_pmu.raw_event(attr->config);
Peter Zijlstra320ebf02010-03-02 12:35:37 +0100499 if ((hwc->config & ARCH_PERFMON_EVENTSEL_ANY) &&
500 perf_paranoid_cpu() && !capable(CAP_SYS_ADMIN))
501 return -EACCES;
Ingo Molnar8326f442009-06-05 20:22:46 +0200502 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100503 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100504
Ingo Molnar8326f442009-06-05 20:22:46 +0200505 if (attr->type == PERF_TYPE_HW_CACHE)
506 return set_ext_hw_attr(hwc, attr);
507
508 if (attr->config >= x86_pmu.max_events)
509 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200510
Ingo Molnar8326f442009-06-05 20:22:46 +0200511 /*
512 * The generic map:
513 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200514 config = x86_pmu.event_map(attr->config);
515
516 if (config == 0)
517 return -ENOENT;
518
519 if (config == -1LL)
520 return -EINVAL;
521
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200522 /*
523 * Branch tracing:
524 */
525 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200526 (hwc->sample_period == 1)) {
527 /* BTS is not supported by this architecture. */
Peter Zijlstraca037702010-03-02 19:52:12 +0100528 if (!x86_pmu.bts)
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200529 return -EOPNOTSUPP;
530
531 /* BTS is currently only allowed for user-mode. */
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300532 if (!attr->exclude_kernel)
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +0200533 return -EOPNOTSUPP;
534 }
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200535
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200536 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200537
Ingo Molnar241771e2008-12-03 10:39:53 +0100538 return 0;
539}
540
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100541static void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530542{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200543 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200544 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100545
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200546 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100547 u64 val;
548
Robert Richter43f62012009-04-29 16:55:56 +0200549 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200550 continue;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100551 rdmsrl(x86_pmu.eventsel + idx, val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100552 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200553 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100554 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100555 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530556 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530557}
558
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200559void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530560{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200561 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
562
Robert Richter85cf9db2009-04-29 12:47:20 +0200563 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200564 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200565
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100566 if (!cpuc->enabled)
567 return;
568
569 cpuc->n_added = 0;
570 cpuc->enabled = 0;
571 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200572
573 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530574}
Ingo Molnar241771e2008-12-03 10:39:53 +0100575
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100576static void x86_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530577{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200578 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530579 int idx;
580
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200581 for (idx = 0; idx < x86_pmu.num_events; idx++) {
582 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +0200583 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100584
Robert Richter43f62012009-04-29 16:55:56 +0200585 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200586 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200587
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200588 val = event->hw.config;
Robert Richterbb1165d2010-03-01 14:21:23 +0100589 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100590 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530591 }
592}
593
Stephane Eranian1da53e02010-01-18 10:58:01 +0200594static const struct pmu pmu;
595
596static inline int is_x86_event(struct perf_event *event)
597{
598 return event->pmu == &pmu;
599}
600
601static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
602{
Peter Zijlstra63b14642010-01-22 16:32:17 +0100603 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200604 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100605 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200606 struct hw_perf_event *hwc;
607
608 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
609
610 for (i = 0; i < n; i++) {
Peter Zijlstrab622d642010-02-01 15:36:30 +0100611 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
612 constraints[i] = c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200613 }
614
615 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200616 * fastpath, try to reuse previous register
617 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100618 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200619 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +0100620 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200621
622 /* never assigned */
623 if (hwc->idx == -1)
624 break;
625
626 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100627 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200628 break;
629
630 /* not already used */
631 if (test_bit(hwc->idx, used_mask))
632 break;
633
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100634 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200635 if (assign)
636 assign[i] = hwc->idx;
637 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100638 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +0200639 goto done;
640
641 /*
642 * begin slow path
643 */
644
645 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
646
647 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200648 * weight = number of possible counters
649 *
650 * 1 = most constrained, only works on one counter
651 * wmax = least constrained, works on any counter
652 *
653 * assign events to counters starting with most
654 * constrained events.
655 */
656 wmax = x86_pmu.num_events;
657
658 /*
659 * when fixed event counters are present,
660 * wmax is incremented by 1 to account
661 * for one more choice
662 */
663 if (x86_pmu.num_events_fixed)
664 wmax++;
665
Stephane Eranian81130702010-01-21 17:39:01 +0200666 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200667 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +0200668 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +0100669 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200670 hwc = &cpuc->event_list[i]->hw;
671
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100672 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200673 continue;
674
Akinobu Mita984b3f52010-03-05 13:41:37 -0800675 for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200676 if (!test_bit(j, used_mask))
677 break;
678 }
679
680 if (j == X86_PMC_IDX_MAX)
681 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200682
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100683 __set_bit(j, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200684
Stephane Eranian1da53e02010-01-18 10:58:01 +0200685 if (assign)
686 assign[i] = j;
687 num--;
688 }
689 }
Stephane Eranian81130702010-01-21 17:39:01 +0200690done:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200691 /*
692 * scheduling failed or is just a simulation,
693 * free resources if necessary
694 */
695 if (!assign || num) {
696 for (i = 0; i < n; i++) {
697 if (x86_pmu.put_event_constraints)
698 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
699 }
700 }
701 return num ? -ENOSPC : 0;
702}
703
704/*
705 * dogrp: true if must collect siblings events (group)
706 * returns total number of events and error code
707 */
708static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
709{
710 struct perf_event *event;
711 int n, max_count;
712
713 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
714
715 /* current number of events already accepted */
716 n = cpuc->n_events;
717
718 if (is_x86_event(leader)) {
719 if (n >= max_count)
720 return -ENOSPC;
721 cpuc->event_list[n] = leader;
722 n++;
723 }
724 if (!dogrp)
725 return n;
726
727 list_for_each_entry(event, &leader->sibling_list, group_entry) {
728 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200729 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200730 continue;
731
732 if (n >= max_count)
733 return -ENOSPC;
734
735 cpuc->event_list[n] = event;
736 n++;
737 }
738 return n;
739}
740
Stephane Eranian1da53e02010-01-18 10:58:01 +0200741static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200742 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200743{
Stephane Eranian447a1942010-02-01 14:50:01 +0200744 struct hw_perf_event *hwc = &event->hw;
745
746 hwc->idx = cpuc->assign[i];
747 hwc->last_cpu = smp_processor_id();
748 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200749
750 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
751 hwc->config_base = 0;
752 hwc->event_base = 0;
753 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
754 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
755 /*
756 * We set it so that event_base + idx in wrmsr/rdmsr maps to
757 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
758 */
759 hwc->event_base =
760 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
761 } else {
762 hwc->config_base = x86_pmu.eventsel;
763 hwc->event_base = x86_pmu.perfctr;
764 }
765}
766
Stephane Eranian447a1942010-02-01 14:50:01 +0200767static inline int match_prev_assignment(struct hw_perf_event *hwc,
768 struct cpu_hw_events *cpuc,
769 int i)
770{
771 return hwc->idx == cpuc->assign[i] &&
772 hwc->last_cpu == smp_processor_id() &&
773 hwc->last_tag == cpuc->tags[i];
774}
775
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100776static int x86_pmu_start(struct perf_event *event);
Stephane Eraniand76a0812010-02-08 17:06:01 +0200777static void x86_pmu_stop(struct perf_event *event);
Peter Zijlstra2e841872010-01-25 15:58:43 +0100778
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200779void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +0100780{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200781 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
782 struct perf_event *event;
783 struct hw_perf_event *hwc;
784 int i;
785
Robert Richter85cf9db2009-04-29 12:47:20 +0200786 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100787 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100788
789 if (cpuc->enabled)
790 return;
791
Stephane Eranian1da53e02010-01-18 10:58:01 +0200792 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100793 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200794 /*
795 * apply assignment obtained either from
796 * hw_perf_group_sched_in() or x86_pmu_enable()
797 *
798 * step1: save events moving to new counters
799 * step2: reprogram moved events into new counters
800 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100801 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200802 event = cpuc->event_list[i];
803 hwc = &event->hw;
804
Stephane Eranian447a1942010-02-01 14:50:01 +0200805 /*
806 * we can avoid reprogramming counter if:
807 * - assigned same counter as last time
808 * - running on same CPU as last time
809 * - no other event has used the counter since
810 */
811 if (hwc->idx == -1 ||
812 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +0200813 continue;
814
Stephane Eraniand76a0812010-02-08 17:06:01 +0200815 x86_pmu_stop(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200816 }
817
818 for (i = 0; i < cpuc->n_events; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200819 event = cpuc->event_list[i];
820 hwc = &event->hw;
821
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100822 if (!match_prev_assignment(hwc, cpuc, i))
Stephane Eranian447a1942010-02-01 14:50:01 +0200823 x86_assign_hw_event(event, cpuc, i);
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100824 else if (i < n_running)
825 continue;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200826
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100827 x86_pmu_start(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200828 }
829 cpuc->n_added = 0;
830 perf_events_lapic_init();
831 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100832
833 cpuc->enabled = 1;
834 barrier();
835
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200836 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +0100837}
Ingo Molnaree060942008-12-13 09:00:03 +0100838
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100839static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100840{
Peter Zijlstra7645a242010-03-08 13:51:31 +0100841 wrmsrl(hwc->config_base + hwc->idx,
Robert Richterbb1165d2010-03-01 14:21:23 +0100842 hwc->config | ARCH_PERFMON_EVENTSEL_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100843}
844
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100845static inline void x86_pmu_disable_event(struct perf_event *event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100846{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100847 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100848
849 wrmsrl(hwc->config_base + hwc->idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100850}
851
Tejun Heo245b2e72009-06-24 15:13:48 +0900852static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100853
Ingo Molnaree060942008-12-13 09:00:03 +0100854/*
855 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200856 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +0100857 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200858static int
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100859x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100860{
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100861 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100862 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200863 s64 period = hwc->sample_period;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100864 int ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100865
Markus Metzger30dd5682009-07-21 15:56:48 +0200866 if (idx == X86_PMC_IDX_FIXED_BTS)
867 return 0;
868
Ingo Molnaree060942008-12-13 09:00:03 +0100869 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200870 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +0100871 */
872 if (unlikely(left <= -period)) {
873 left = period;
874 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200875 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200876 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100877 }
878
879 if (unlikely(left <= 0)) {
880 left += period;
881 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200882 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200883 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100884 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200885 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200886 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200887 */
888 if (unlikely(left < 2))
889 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +0100890
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200891 if (left > x86_pmu.max_period)
892 left = x86_pmu.max_period;
893
Tejun Heo245b2e72009-06-24 15:13:48 +0900894 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +0100895
896 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200897 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +0100898 * mark it to be able to extra future deltas:
899 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100900 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100901
Peter Zijlstra7645a242010-03-08 13:51:31 +0100902 wrmsrl(hwc->event_base + idx,
903 (u64)(-left) & x86_pmu.event_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200904
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200905 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200906
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200907 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100908}
909
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100910static void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +0200911{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200912 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +0200913 if (cpuc->enabled)
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100914 __x86_pmu_enable_event(&event->hw);
Ingo Molnar241771e2008-12-03 10:39:53 +0100915}
916
Ingo Molnaree060942008-12-13 09:00:03 +0100917/*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200918 * activate a single event
919 *
920 * The event is added to the group of enabled events
921 * but only if it can be scehduled with existing events.
922 *
923 * Called with PMU disabled. If successful and return value 1,
924 * then guaranteed to call perf_enable() and hw_perf_enable()
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200925 */
926static int x86_pmu_enable(struct perf_event *event)
927{
928 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200929 struct hw_perf_event *hwc;
930 int assign[X86_PMC_IDX_MAX];
931 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200932
Stephane Eranian1da53e02010-01-18 10:58:01 +0200933 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200934
Stephane Eranian1da53e02010-01-18 10:58:01 +0200935 n0 = cpuc->n_events;
936 n = collect_events(cpuc, event, false);
937 if (n < 0)
938 return n;
Ingo Molnar53b441a2009-05-25 21:41:28 +0200939
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300940 ret = x86_pmu.schedule_events(cpuc, n, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200941 if (ret)
942 return ret;
943 /*
944 * copy new assignment, now we know it is possible
945 * will be used by hw_perf_enable()
946 */
947 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +0100948
Stephane Eranian1da53e02010-01-18 10:58:01 +0200949 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +0100950 cpuc->n_added += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100951
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100952 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100953}
954
Stephane Eraniand76a0812010-02-08 17:06:01 +0200955static int x86_pmu_start(struct perf_event *event)
956{
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100957 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
958 int idx = event->hw.idx;
959
960 if (idx == -1)
Stephane Eraniand76a0812010-02-08 17:06:01 +0200961 return -EAGAIN;
962
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100963 x86_perf_event_set_period(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100964 cpuc->events[idx] = event;
965 __set_bit(idx, cpuc->active_mask);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100966 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +0100967 perf_event_update_userpage(event);
Stephane Eraniand76a0812010-02-08 17:06:01 +0200968
969 return 0;
970}
971
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200972static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200973{
Peter Zijlstra71e2d282010-03-08 17:51:33 +0100974 int ret = x86_pmu_start(event);
975 WARN_ON_ONCE(ret);
Peter Zijlstraa78ac322009-05-25 17:39:05 +0200976}
977
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200978void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100979{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100980 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Peter Zijlstraca037702010-03-02 19:52:12 +0100981 u64 pebs;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200982 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200983 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +0100984 int cpu, idx;
985
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200986 if (!x86_pmu.num_events)
Ingo Molnar1e125672008-12-09 12:18:18 +0100987 return;
Ingo Molnar241771e2008-12-03 10:39:53 +0100988
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +0200989 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +0100990
991 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200992 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100993
Robert Richterfaa28ae2009-04-29 12:47:13 +0200994 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530995 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
996 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
997 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
998 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +0100999 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001000
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301001 pr_info("\n");
1002 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1003 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1004 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1005 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +01001006 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301007 }
Peter Zijlstra7645a242010-03-08 13:51:31 +01001008 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001009
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001010 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001011 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1012 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001013
Tejun Heo245b2e72009-06-24 15:13:48 +09001014 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001015
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301016 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001017 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301018 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001019 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301020 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001021 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001022 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001023 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001024 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1025
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301026 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001027 cpu, idx, pmc_count);
1028 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001029 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001030}
1031
Stephane Eraniand76a0812010-02-08 17:06:01 +02001032static void x86_pmu_stop(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001033{
Stephane Eraniand76a0812010-02-08 17:06:01 +02001034 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001035 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001036 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001037
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001038 if (!__test_and_clear_bit(idx, cpuc->active_mask))
1039 return;
1040
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001041 x86_pmu.disable(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001042
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001043 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001044 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001045 * that we are disabling:
1046 */
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001047 x86_perf_event_update(event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001048
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001049 cpuc->events[idx] = NULL;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001050}
1051
1052static void x86_pmu_disable(struct perf_event *event)
1053{
1054 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1055 int i;
1056
Stephane Eraniand76a0812010-02-08 17:06:01 +02001057 x86_pmu_stop(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001058
Stephane Eranian1da53e02010-01-18 10:58:01 +02001059 for (i = 0; i < cpuc->n_events; i++) {
1060 if (event == cpuc->event_list[i]) {
1061
1062 if (x86_pmu.put_event_constraints)
1063 x86_pmu.put_event_constraints(cpuc, event);
1064
1065 while (++i < cpuc->n_events)
1066 cpuc->event_list[i-1] = cpuc->event_list[i];
1067
1068 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001069 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001070 }
1071 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001072 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001073}
1074
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001075static int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001076{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001077 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001078 struct cpu_hw_events *cpuc;
1079 struct perf_event *event;
1080 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001081 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001082 u64 val;
1083
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001084 perf_sample_data_init(&data, 0);
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001085
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001086 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001087
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001088 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02001089 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001090 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001091
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001092 event = cpuc->events[idx];
1093 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001094
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001095 val = x86_perf_event_update(event);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001096 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001097 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001098
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001099 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001100 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001101 */
1102 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001103 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001104
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001105 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001106 continue;
1107
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001108 if (perf_event_overflow(event, 1, &data, regs))
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001109 x86_pmu_stop(event);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001110 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001111
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001112 if (handled)
1113 inc_irq_stat(apic_perf_irqs);
1114
Robert Richtera29aa8a2009-04-29 12:47:21 +02001115 return handled;
1116}
Robert Richter39d81ea2009-04-29 12:47:05 +02001117
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001118void smp_perf_pending_interrupt(struct pt_regs *regs)
1119{
1120 irq_enter();
1121 ack_APIC_irq();
1122 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001123 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001124 irq_exit();
1125}
1126
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001127void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001128{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001129#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02001130 if (!x86_pmu.apic || !x86_pmu_initialized())
1131 return;
1132
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001133 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001134#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001135}
1136
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001137void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001138{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001139#ifdef CONFIG_X86_LOCAL_APIC
1140 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001141 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001142
Ingo Molnar241771e2008-12-03 10:39:53 +01001143 /*
Yong Wangc323d952009-05-29 13:28:35 +08001144 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001145 */
Yong Wangc323d952009-05-29 13:28:35 +08001146 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001147#endif
Ingo Molnar241771e2008-12-03 10:39:53 +01001148}
1149
1150static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001151perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01001152 unsigned long cmd, void *__args)
1153{
1154 struct die_args *args = __args;
1155 struct pt_regs *regs;
1156
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001157 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001158 return NOTIFY_DONE;
1159
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001160 switch (cmd) {
1161 case DIE_NMI:
1162 case DIE_NMI_IPI:
1163 break;
1164
1165 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001166 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001167 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001168
1169 regs = args->regs;
1170
Ingo Molnar04da8a42009-08-11 10:40:08 +02001171#ifdef CONFIG_X86_LOCAL_APIC
Ingo Molnar241771e2008-12-03 10:39:53 +01001172 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001173#endif
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001174 /*
1175 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001176 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001177 *
1178 * If the first NMI handles both, the latter will be empty and daze
1179 * the CPU.
1180 */
Yong Wanga3288102009-06-03 13:12:55 +08001181 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001182
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001183 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001184}
1185
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001186static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1187 .notifier_call = perf_event_nmi_handler,
1188 .next = NULL,
1189 .priority = 1
1190};
1191
Peter Zijlstra63b14642010-01-22 16:32:17 +01001192static struct event_constraint unconstrained;
Stephane Eranian38331f62010-02-08 17:17:01 +02001193static struct event_constraint emptyconstraint;
Peter Zijlstra63b14642010-01-22 16:32:17 +01001194
Peter Zijlstra63b14642010-01-22 16:32:17 +01001195static struct event_constraint *
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001196x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001197{
Peter Zijlstra63b14642010-01-22 16:32:17 +01001198 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001199
Stephane Eranian1da53e02010-01-18 10:58:01 +02001200 if (x86_pmu.event_constraints) {
1201 for_each_event_constraint(c, x86_pmu.event_constraints) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01001202 if ((event->hw.config & c->cmask) == c->code)
1203 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001204 }
1205 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01001206
1207 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001208}
1209
Stephane Eranian1da53e02010-01-18 10:58:01 +02001210static int x86_event_sched_in(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001211 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001212{
1213 int ret = 0;
1214
1215 event->state = PERF_EVENT_STATE_ACTIVE;
Peter Zijlstra6e377382010-02-11 13:21:58 +01001216 event->oncpu = smp_processor_id();
Stephane Eranian1da53e02010-01-18 10:58:01 +02001217 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
1218
1219 if (!is_x86_event(event))
1220 ret = event->pmu->enable(event);
1221
1222 if (!ret && !is_software_event(event))
1223 cpuctx->active_oncpu++;
1224
1225 if (!ret && event->attr.exclusive)
1226 cpuctx->exclusive = 1;
1227
1228 return ret;
1229}
1230
1231static void x86_event_sched_out(struct perf_event *event,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001232 struct perf_cpu_context *cpuctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001233{
1234 event->state = PERF_EVENT_STATE_INACTIVE;
1235 event->oncpu = -1;
1236
1237 if (!is_x86_event(event))
1238 event->pmu->disable(event);
1239
1240 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
1241
1242 if (!is_software_event(event))
1243 cpuctx->active_oncpu--;
1244
1245 if (event->attr.exclusive || !cpuctx->active_oncpu)
1246 cpuctx->exclusive = 0;
1247}
1248
1249/*
1250 * Called to enable a whole group of events.
1251 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
1252 * Assumes the caller has disabled interrupts and has
1253 * frozen the PMU with hw_perf_save_disable.
1254 *
1255 * called with PMU disabled. If successful and return value 1,
1256 * then guaranteed to call perf_enable() and hw_perf_enable()
1257 */
1258int hw_perf_group_sched_in(struct perf_event *leader,
1259 struct perf_cpu_context *cpuctx,
Peter Zijlstra6e377382010-02-11 13:21:58 +01001260 struct perf_event_context *ctx)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001261{
Peter Zijlstra6e377382010-02-11 13:21:58 +01001262 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001263 struct perf_event *sub;
1264 int assign[X86_PMC_IDX_MAX];
1265 int n0, n1, ret;
1266
Cyrill Gorcunov0b861222010-03-12 00:50:16 +03001267 if (!x86_pmu_initialized())
1268 return 0;
1269
Stephane Eranian1da53e02010-01-18 10:58:01 +02001270 /* n0 = total number of events */
1271 n0 = collect_events(cpuc, leader, true);
1272 if (n0 < 0)
1273 return n0;
1274
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001275 ret = x86_pmu.schedule_events(cpuc, n0, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001276 if (ret)
1277 return ret;
1278
Peter Zijlstra6e377382010-02-11 13:21:58 +01001279 ret = x86_event_sched_in(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001280 if (ret)
1281 return ret;
1282
1283 n1 = 1;
1284 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Stephane Eranian81130702010-01-21 17:39:01 +02001285 if (sub->state > PERF_EVENT_STATE_OFF) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001286 ret = x86_event_sched_in(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001287 if (ret)
1288 goto undo;
1289 ++n1;
1290 }
1291 }
1292 /*
1293 * copy new assignment, now we know it is possible
1294 * will be used by hw_perf_enable()
1295 */
1296 memcpy(cpuc->assign, assign, n0*sizeof(int));
1297
1298 cpuc->n_events = n0;
Peter Zijlstra356e1f22010-03-06 13:49:56 +01001299 cpuc->n_added += n1;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001300 ctx->nr_active += n1;
1301
1302 /*
1303 * 1 means successful and events are active
1304 * This is not quite true because we defer
1305 * actual activation until hw_perf_enable() but
1306 * this way we* ensure caller won't try to enable
1307 * individual events
1308 */
1309 return 1;
1310undo:
Peter Zijlstra6e377382010-02-11 13:21:58 +01001311 x86_event_sched_out(leader, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001312 n0 = 1;
1313 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
1314 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
Peter Zijlstra6e377382010-02-11 13:21:58 +01001315 x86_event_sched_out(sub, cpuctx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001316 if (++n0 == n1)
1317 break;
1318 }
1319 }
1320 return ret;
1321}
1322
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001323#include "perf_event_amd.c"
1324#include "perf_event_p6.c"
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001325#include "perf_event_p4.c"
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001326#include "perf_event_intel_lbr.c"
Peter Zijlstraca037702010-03-02 19:52:12 +01001327#include "perf_event_intel_ds.c"
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001328#include "perf_event_intel.c"
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301329
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001330static int __cpuinit
1331x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1332{
1333 unsigned int cpu = (long)hcpu;
1334
1335 switch (action & ~CPU_TASKS_FROZEN) {
1336 case CPU_UP_PREPARE:
1337 if (x86_pmu.cpu_prepare)
1338 x86_pmu.cpu_prepare(cpu);
1339 break;
1340
1341 case CPU_STARTING:
1342 if (x86_pmu.cpu_starting)
1343 x86_pmu.cpu_starting(cpu);
1344 break;
1345
1346 case CPU_DYING:
1347 if (x86_pmu.cpu_dying)
1348 x86_pmu.cpu_dying(cpu);
1349 break;
1350
1351 case CPU_DEAD:
1352 if (x86_pmu.cpu_dead)
1353 x86_pmu.cpu_dead(cpu);
1354 break;
1355
1356 default:
1357 break;
1358 }
1359
1360 return NOTIFY_OK;
1361}
1362
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001363static void __init pmu_check_apic(void)
1364{
1365 if (cpu_has_apic)
1366 return;
1367
1368 x86_pmu.apic = 0;
1369 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1370 pr_info("no hardware sampling interrupt available.\n");
1371}
1372
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001373void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301374{
Peter Zijlstrab622d642010-02-01 15:36:30 +01001375 struct event_constraint *c;
Robert Richter72eae042009-04-29 12:47:10 +02001376 int err;
1377
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001378 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001379
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301380 switch (boot_cpu_data.x86_vendor) {
1381 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001382 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301383 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301384 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001385 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301386 break;
Robert Richter41389602009-04-29 12:47:00 +02001387 default:
1388 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301389 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001390 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001391 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301392 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001393 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301394
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001395 pmu_check_apic();
1396
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001397 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001398
Peter Zijlstra3c447802010-03-04 21:49:01 +01001399 if (x86_pmu.quirks)
1400 x86_pmu.quirks();
1401
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001402 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
1403 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
1404 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
1405 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001406 }
Robert Richterd6dc0b42010-03-17 12:49:13 +01001407 x86_pmu.intel_ctrl = (1 << x86_pmu.num_events) - 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001408 perf_max_events = x86_pmu.num_events;
Ingo Molnar241771e2008-12-03 10:39:53 +01001409
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001410 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
1411 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
1412 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
1413 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001414 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001415
Robert Richterd6dc0b42010-03-17 12:49:13 +01001416 x86_pmu.intel_ctrl |=
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001417 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001418
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001419 perf_events_lapic_init();
1420 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001421
Peter Zijlstra63b14642010-01-22 16:32:17 +01001422 unconstrained = (struct event_constraint)
Peter Zijlstrafce877e2010-01-29 13:25:12 +01001423 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
1424 0, x86_pmu.num_events);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001425
Peter Zijlstrab622d642010-02-01 15:36:30 +01001426 if (x86_pmu.event_constraints) {
1427 for_each_event_constraint(c, x86_pmu.event_constraints) {
1428 if (c->cmask != INTEL_ARCH_FIXED_MASK)
1429 continue;
1430
1431 c->idxmsk64 |= (1ULL << x86_pmu.num_events) - 1;
1432 c->weight += x86_pmu.num_events;
1433 }
1434 }
1435
Ingo Molnar57c0c152009-09-21 12:20:38 +02001436 pr_info("... version: %d\n", x86_pmu.version);
1437 pr_info("... bit width: %d\n", x86_pmu.event_bits);
1438 pr_info("... generic registers: %d\n", x86_pmu.num_events);
1439 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
1440 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1441 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
Robert Richterd6dc0b42010-03-17 12:49:13 +01001442 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001443
1444 perf_cpu_notifier(x86_pmu_notifier);
Ingo Molnar241771e2008-12-03 10:39:53 +01001445}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001446
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001447static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001448{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001449 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001450}
1451
Robert Richter4aeb0b42009-04-29 12:47:03 +02001452static const struct pmu pmu = {
1453 .enable = x86_pmu_enable,
1454 .disable = x86_pmu_disable,
Stephane Eraniand76a0812010-02-08 17:06:01 +02001455 .start = x86_pmu_start,
1456 .stop = x86_pmu_stop,
Robert Richter4aeb0b42009-04-29 12:47:03 +02001457 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001458 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001459};
1460
Stephane Eranian1da53e02010-01-18 10:58:01 +02001461/*
Peter Zijlstraca037702010-03-02 19:52:12 +01001462 * validate that we can schedule this event
1463 */
1464static int validate_event(struct perf_event *event)
1465{
1466 struct cpu_hw_events *fake_cpuc;
1467 struct event_constraint *c;
1468 int ret = 0;
1469
1470 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1471 if (!fake_cpuc)
1472 return -ENOMEM;
1473
1474 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1475
1476 if (!c || !c->weight)
1477 ret = -ENOSPC;
1478
1479 if (x86_pmu.put_event_constraints)
1480 x86_pmu.put_event_constraints(fake_cpuc, event);
1481
1482 kfree(fake_cpuc);
1483
1484 return ret;
1485}
1486
1487/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001488 * validate a single event group
1489 *
1490 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01001491 * - check events are compatible which each other
1492 * - events do not compete for the same counter
1493 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001494 *
1495 * validation ensures the group can be loaded onto the
1496 * PMU if it was the only group available.
1497 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001498static int validate_group(struct perf_event *event)
1499{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001500 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01001501 struct cpu_hw_events *fake_cpuc;
1502 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001503
Peter Zijlstra502568d2010-01-22 14:35:46 +01001504 ret = -ENOMEM;
1505 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1506 if (!fake_cpuc)
1507 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001508
Stephane Eranian1da53e02010-01-18 10:58:01 +02001509 /*
1510 * the event is not yet connected with its
1511 * siblings therefore we must first collect
1512 * existing siblings, then add the new event
1513 * before we can simulate the scheduling
1514 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01001515 ret = -ENOSPC;
1516 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001517 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001518 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001519
Peter Zijlstra502568d2010-01-22 14:35:46 +01001520 fake_cpuc->n_events = n;
1521 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001522 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001523 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001524
Peter Zijlstra502568d2010-01-22 14:35:46 +01001525 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001526
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001527 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
Peter Zijlstra502568d2010-01-22 14:35:46 +01001528
1529out_free:
1530 kfree(fake_cpuc);
1531out:
1532 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001533}
1534
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001535const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001536{
Stephane Eranian81130702010-01-21 17:39:01 +02001537 const struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001538 int err;
1539
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001540 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001541 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02001542 /*
1543 * we temporarily connect event to its pmu
1544 * such that validate_group() can classify
1545 * it as an x86 event using is_x86_event()
1546 */
1547 tmp = event->pmu;
1548 event->pmu = &pmu;
1549
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001550 if (event->group_leader != event)
1551 err = validate_group(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001552 else
1553 err = validate_event(event);
Stephane Eranian81130702010-01-21 17:39:01 +02001554
1555 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001556 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001557 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001558 if (event->destroy)
1559 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001560 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001561 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01001562
Robert Richter4aeb0b42009-04-29 12:47:03 +02001563 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001564}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001565
1566/*
1567 * callchain support
1568 */
1569
1570static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001571void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001572{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001573 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001574 entry->ip[entry->nr++] = ip;
1575}
1576
Tejun Heo245b2e72009-06-24 15:13:48 +09001577static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
1578static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001579
1580
1581static void
1582backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1583{
1584 /* Ignore warnings */
1585}
1586
1587static void backtrace_warning(void *data, char *msg)
1588{
1589 /* Ignore warnings */
1590}
1591
1592static int backtrace_stack(void *data, char *name)
1593{
Ingo Molnar038e8362009-06-15 09:57:59 +02001594 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001595}
1596
1597static void backtrace_address(void *data, unsigned long addr, int reliable)
1598{
1599 struct perf_callchain_entry *entry = data;
1600
1601 if (reliable)
1602 callchain_store(entry, addr);
1603}
1604
1605static const struct stacktrace_ops backtrace_ops = {
1606 .warning = backtrace_warning,
1607 .warning_symbol = backtrace_warning_symbol,
1608 .stack = backtrace_stack,
1609 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01001610 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001611};
1612
Ingo Molnar038e8362009-06-15 09:57:59 +02001613#include "../dumpstack.h"
1614
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001615static void
1616perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1617{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001618 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02001619 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001620
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01001621 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001622}
1623
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001624static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1625{
1626 unsigned long bytes;
1627
1628 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
1629
1630 return bytes == sizeof(*frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001631}
1632
1633static void
1634perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1635{
1636 struct stack_frame frame;
1637 const void __user *fp;
1638
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001639 if (!user_mode(regs))
1640 regs = task_pt_regs(current);
1641
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001642 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001643
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001644 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001645 callchain_store(entry, regs->ip);
1646
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001647 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Ingo Molnar038e8362009-06-15 09:57:59 +02001648 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001649 frame.return_address = 0;
1650
1651 if (!copy_stack_frame(fp, &frame))
1652 break;
1653
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001654 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001655 break;
1656
1657 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001658 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001659 }
1660}
1661
1662static void
1663perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1664{
1665 int is_user;
1666
1667 if (!regs)
1668 return;
1669
1670 is_user = user_mode(regs);
1671
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001672 if (is_user && current->state != TASK_RUNNING)
1673 return;
1674
1675 if (!is_user)
1676 perf_callchain_kernel(regs, entry);
1677
1678 if (current->mm)
1679 perf_callchain_user(regs, entry);
1680}
1681
1682struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1683{
1684 struct perf_callchain_entry *entry;
1685
1686 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09001687 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001688 else
Tejun Heo245b2e72009-06-24 15:13:48 +09001689 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001690
1691 entry->nr = 0;
1692
1693 perf_do_callchain(regs, entry);
1694
1695 return entry;
1696}
Frederic Weisbecker5331d7b2010-03-04 21:15:56 +01001697
Frederic Weisbecker1d199b12010-03-16 01:05:02 +01001698#ifdef CONFIG_EVENT_TRACING
Frederic Weisbecker5331d7b2010-03-04 21:15:56 +01001699void perf_arch_fetch_caller_regs(struct pt_regs *regs, unsigned long ip, int skip)
1700{
1701 regs->ip = ip;
1702 /*
1703 * perf_arch_fetch_caller_regs adds another call, we need to increment
1704 * the skip level
1705 */
1706 regs->bp = rewind_frame_pointer(skip + 1);
1707 regs->cs = __KERNEL_CS;
1708 local_save_flags(regs->flags);
1709}
Frederic Weisbecker1d199b12010-03-16 01:05:02 +01001710#endif