blob: fe73c1844a9a5b7c9a0c902bef0b9f993207acee [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090024#include <linux/slab.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020025#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020026#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010027#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010028
Ingo Molnar241771e2008-12-03 10:39:53 +010029#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020030#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020031#include <asm/nmi.h>
Torok Edwin257ef9d2010-03-17 12:07:16 +020032#include <asm/compat.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010033
Peter Zijlstra7645a242010-03-08 13:51:31 +010034#if 0
35#undef wrmsrl
36#define wrmsrl(msr, val) \
37do { \
38 trace_printk("wrmsrl(%lx, %lx)\n", (unsigned long)(msr),\
39 (unsigned long)(val)); \
40 native_write_msr((msr), (u32)((u64)(val)), \
41 (u32)((u64)(val) >> 32)); \
42} while (0)
43#endif
44
Peter Zijlstraef21f682010-03-03 13:12:23 +010045/*
46 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
47 */
48static unsigned long
49copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
50{
51 unsigned long offset, addr = (unsigned long)from;
52 int type = in_nmi() ? KM_NMI : KM_IRQ0;
53 unsigned long size, len = 0;
54 struct page *page;
55 void *map;
56 int ret;
57
58 do {
59 ret = __get_user_pages_fast(addr, 1, 0, &page);
60 if (!ret)
61 break;
62
63 offset = addr & (PAGE_SIZE - 1);
64 size = min(PAGE_SIZE - offset, n - len);
65
66 map = kmap_atomic(page, type);
67 memcpy(to, map+offset, size);
68 kunmap_atomic(map, type);
69 put_page(page);
70
71 len += size;
72 to += size;
73 addr += size;
74
75 } while (len < n);
76
77 return len;
78}
79
Stephane Eranian1da53e02010-01-18 10:58:01 +020080struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010081 union {
82 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab622d642010-02-01 15:36:30 +010083 u64 idxmsk64;
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010084 };
Peter Zijlstrab622d642010-02-01 15:36:30 +010085 u64 code;
86 u64 cmask;
Peter Zijlstra272d30b2010-01-22 16:32:17 +010087 int weight;
Stephane Eranian1da53e02010-01-18 10:58:01 +020088};
89
Stephane Eranian38331f62010-02-08 17:17:01 +020090struct amd_nb {
91 int nb_id; /* NorthBridge id */
92 int refcnt; /* reference count */
93 struct perf_event *owners[X86_PMC_IDX_MAX];
94 struct event_constraint event_constraints[X86_PMC_IDX_MAX];
95};
96
Peter Zijlstracaff2be2010-03-03 12:02:30 +010097#define MAX_LBR_ENTRIES 16
98
Ingo Molnarcdd6c482009-09-21 12:02:48 +020099struct cpu_hw_events {
Peter Zijlstraca037702010-03-02 19:52:12 +0100100 /*
101 * Generic x86 PMC bits
102 */
Stephane Eranian1da53e02010-01-18 10:58:01 +0200103 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +0200104 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Robert Richter63e6be62010-09-15 18:20:34 +0200105 unsigned long running[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100106 int enabled;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200107
108 int n_events;
109 int n_added;
Stephane Eranian90151c352010-05-25 16:23:10 +0200110 int n_txn;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200111 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
Stephane Eranian447a1942010-02-01 14:50:01 +0200112 u64 tags[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200113 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Peter Zijlstraca037702010-03-02 19:52:12 +0100114
Lin Ming4d1c52b2010-04-23 13:56:12 +0800115 unsigned int group_flag;
116
Peter Zijlstraca037702010-03-02 19:52:12 +0100117 /*
118 * Intel DebugStore bits
119 */
120 struct debug_store *ds;
121 u64 pebs_enabled;
122
123 /*
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100124 * Intel LBR bits
125 */
126 int lbr_users;
127 void *lbr_context;
128 struct perf_branch_stack lbr_stack;
129 struct perf_branch_entry lbr_entries[MAX_LBR_ENTRIES];
130
131 /*
Peter Zijlstraca037702010-03-02 19:52:12 +0100132 * AMD specific bits
133 */
Stephane Eranian38331f62010-02-08 17:17:01 +0200134 struct amd_nb *amd_nb;
Ingo Molnar241771e2008-12-03 10:39:53 +0100135};
136
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100137#define __EVENT_CONSTRAINT(c, n, m, w) {\
Peter Zijlstrab622d642010-02-01 15:36:30 +0100138 { .idxmsk64 = (n) }, \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100139 .code = (c), \
140 .cmask = (m), \
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100141 .weight = (w), \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100142}
Stephane Eranianb6900812009-10-06 16:42:09 +0200143
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100144#define EVENT_CONSTRAINT(c, n, m) \
145 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
146
Peter Zijlstraca037702010-03-02 19:52:12 +0100147/*
148 * Constraint on the Event code.
149 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100150#define INTEL_EVENT_CONSTRAINT(c, n) \
Robert Richtera098f442010-03-30 11:28:21 +0200151 EVENT_CONSTRAINT(c, n, ARCH_PERFMON_EVENTSEL_EVENT)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100152
Peter Zijlstraca037702010-03-02 19:52:12 +0100153/*
154 * Constraint on the Event code + UMask + fixed-mask
Robert Richtera098f442010-03-30 11:28:21 +0200155 *
156 * filter mask to validate fixed counter events.
157 * the following filters disqualify for fixed counters:
158 * - inv
159 * - edge
160 * - cnt-mask
161 * The other filters are supported by fixed counters.
162 * The any-thread option is supported starting with v3.
Peter Zijlstraca037702010-03-02 19:52:12 +0100163 */
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100164#define FIXED_EVENT_CONSTRAINT(c, n) \
Robert Richtera098f442010-03-30 11:28:21 +0200165 EVENT_CONSTRAINT(c, (1ULL << (32+n)), X86_RAW_EVENT_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100166
Peter Zijlstraca037702010-03-02 19:52:12 +0100167/*
168 * Constraint on the Event code + UMask
169 */
170#define PEBS_EVENT_CONSTRAINT(c, n) \
171 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVENT_MASK)
172
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100173#define EVENT_CONSTRAINT_END \
174 EVENT_CONSTRAINT(0, 0, 0)
175
176#define for_each_event_constraint(e, c) \
Robert Richtera1f2b702010-04-13 22:23:15 +0200177 for ((e) = (c); (e)->weight; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200178
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100179union perf_capabilities {
180 struct {
181 u64 lbr_format : 6;
182 u64 pebs_trap : 1;
183 u64 pebs_arch_reg : 1;
184 u64 pebs_format : 4;
185 u64 smm_freeze : 1;
186 };
187 u64 capabilities;
188};
189
Ingo Molnar241771e2008-12-03 10:39:53 +0100190/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200191 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100192 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200193struct x86_pmu {
Peter Zijlstraca037702010-03-02 19:52:12 +0100194 /*
195 * Generic x86 PMC bits
196 */
Robert Richterfaa28ae2009-04-29 12:47:13 +0200197 const char *name;
198 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800199 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200200 void (*disable_all)(void);
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100201 void (*enable_all)(int added);
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100202 void (*enable)(struct perf_event *);
203 void (*disable)(struct perf_event *);
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200204 int (*hw_config)(struct perf_event *event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300205 int (*schedule_events)(struct cpu_hw_events *cpuc, int n, int *assign);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530206 unsigned eventsel;
207 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100208 u64 (*event_map)(int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530209 int max_events;
Robert Richter948b1bb2010-03-29 18:36:50 +0200210 int num_counters;
211 int num_counters_fixed;
212 int cntval_bits;
213 u64 cntval_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200214 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200215 u64 max_period;
Peter Zijlstra63b14642010-01-22 16:32:17 +0100216 struct event_constraint *
217 (*get_event_constraints)(struct cpu_hw_events *cpuc,
218 struct perf_event *event);
219
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100220 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
221 struct perf_event *event);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100222 struct event_constraint *event_constraints;
Peter Zijlstra3c447802010-03-04 21:49:01 +0100223 void (*quirks)(void);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +0400224 int perfctr_second_write;
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100225
Peter Zijlstrab38b24e2010-03-23 19:31:15 +0100226 int (*cpu_prepare)(int cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +0100227 void (*cpu_starting)(int cpu);
228 void (*cpu_dying)(int cpu);
229 void (*cpu_dead)(int cpu);
Peter Zijlstraca037702010-03-02 19:52:12 +0100230
231 /*
232 * Intel Arch Perfmon v2+
233 */
Peter Zijlstra8db909a2010-03-03 17:07:40 +0100234 u64 intel_ctrl;
235 union perf_capabilities intel_cap;
Peter Zijlstraca037702010-03-02 19:52:12 +0100236
237 /*
238 * Intel DebugStore bits
239 */
240 int bts, pebs;
241 int pebs_record_size;
242 void (*drain_pebs)(struct pt_regs *regs);
243 struct event_constraint *pebs_constraints;
Peter Zijlstracaff2be2010-03-03 12:02:30 +0100244
245 /*
246 * Intel LBR
247 */
248 unsigned long lbr_tos, lbr_from, lbr_to; /* MSR base regs */
249 int lbr_nr; /* hardware stack size */
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530250};
251
Robert Richter4a06bd82009-04-29 12:47:11 +0200252static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530253
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200254static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100255 .enabled = 1,
256};
Ingo Molnar241771e2008-12-03 10:39:53 +0100257
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100258static int x86_perf_event_set_period(struct perf_event *event);
Stephane Eranianb6900812009-10-06 16:42:09 +0200259
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530260/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200261 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200262 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200263 * 'not supported', -1 means 'hw_event makes no sense on
264 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200265 * ID.
266 */
267
268#define C(x) PERF_COUNT_HW_CACHE_##x
269
270static u64 __read_mostly hw_cache_event_ids
271 [PERF_COUNT_HW_CACHE_MAX]
272 [PERF_COUNT_HW_CACHE_OP_MAX]
273 [PERF_COUNT_HW_CACHE_RESULT_MAX];
274
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530275/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200276 * Propagate event elapsed time into the generic event.
277 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100278 * Returns the delta events processed.
279 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200280static u64
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100281x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +0100282{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100283 struct hw_perf_event *hwc = &event->hw;
Robert Richter948b1bb2010-03-29 18:36:50 +0200284 int shift = 64 - x86_pmu.cntval_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200285 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +0100286 int idx = hwc->idx;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200287 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100288
Markus Metzger30dd5682009-07-21 15:56:48 +0200289 if (idx == X86_PMC_IDX_FIXED_BTS)
290 return 0;
291
Ingo Molnaree060942008-12-13 09:00:03 +0100292 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200293 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100294 *
295 * Our tactic to handle this is to first atomically read and
296 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200297 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100298 */
299again:
Peter Zijlstrae7850592010-05-21 14:43:08 +0200300 prev_raw_count = local64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200301 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100302
Peter Zijlstrae7850592010-05-21 14:43:08 +0200303 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Ingo Molnaree060942008-12-13 09:00:03 +0100304 new_raw_count) != prev_raw_count)
305 goto again;
306
307 /*
308 * Now we have the new raw value and have updated the prev
309 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200310 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100311 *
312 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200313 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100314 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200315 delta = (new_raw_count << shift) - (prev_raw_count << shift);
316 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100317
Peter Zijlstrae7850592010-05-21 14:43:08 +0200318 local64_add(delta, &event->count);
319 local64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200320
321 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100322}
323
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200324static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200325static DEFINE_MUTEX(pmc_reserve_mutex);
326
Robert Richterb27ea292010-03-17 12:49:10 +0100327#ifdef CONFIG_X86_LOCAL_APIC
328
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200329static bool reserve_pmc_hardware(void)
330{
331 int i;
332
333 if (nmi_watchdog == NMI_LOCAL_APIC)
334 disable_lapic_nmi_watchdog();
335
Robert Richter948b1bb2010-03-29 18:36:50 +0200336 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200337 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200338 goto perfctr_fail;
339 }
340
Robert Richter948b1bb2010-03-29 18:36:50 +0200341 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200342 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200343 goto eventsel_fail;
344 }
345
346 return true;
347
348eventsel_fail:
349 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200350 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200351
Robert Richter948b1bb2010-03-29 18:36:50 +0200352 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200353
354perfctr_fail:
355 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200356 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200357
358 if (nmi_watchdog == NMI_LOCAL_APIC)
359 enable_lapic_nmi_watchdog();
360
361 return false;
362}
363
364static void release_pmc_hardware(void)
365{
366 int i;
367
Robert Richter948b1bb2010-03-29 18:36:50 +0200368 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200369 release_perfctr_nmi(x86_pmu.perfctr + i);
370 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200371 }
372
373 if (nmi_watchdog == NMI_LOCAL_APIC)
374 enable_lapic_nmi_watchdog();
375}
376
Robert Richterb27ea292010-03-17 12:49:10 +0100377#else
378
379static bool reserve_pmc_hardware(void) { return true; }
380static void release_pmc_hardware(void) {}
381
382#endif
383
Peter Zijlstraca037702010-03-02 19:52:12 +0100384static int reserve_ds_buffers(void);
385static void release_ds_buffers(void);
Markus Metzger30dd5682009-07-21 15:56:48 +0200386
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200387static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200388{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200389 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200390 release_pmc_hardware();
Peter Zijlstraca037702010-03-02 19:52:12 +0100391 release_ds_buffers();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200392 mutex_unlock(&pmc_reserve_mutex);
393 }
394}
395
Robert Richter85cf9db2009-04-29 12:47:20 +0200396static inline int x86_pmu_initialized(void)
397{
398 return x86_pmu.handle_irq != NULL;
399}
400
Ingo Molnar8326f442009-06-05 20:22:46 +0200401static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200402set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +0200403{
404 unsigned int cache_type, cache_op, cache_result;
405 u64 config, val;
406
407 config = attr->config;
408
409 cache_type = (config >> 0) & 0xff;
410 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
411 return -EINVAL;
412
413 cache_op = (config >> 8) & 0xff;
414 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
415 return -EINVAL;
416
417 cache_result = (config >> 16) & 0xff;
418 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
419 return -EINVAL;
420
421 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
422
423 if (val == 0)
424 return -ENOENT;
425
426 if (val == -1)
427 return -EINVAL;
428
429 hwc->config |= val;
430
431 return 0;
432}
433
Robert Richterc1726f32010-04-13 22:23:11 +0200434static int x86_setup_perfctr(struct perf_event *event)
435{
436 struct perf_event_attr *attr = &event->attr;
437 struct hw_perf_event *hwc = &event->hw;
438 u64 config;
439
440 if (!hwc->sample_period) {
441 hwc->sample_period = x86_pmu.max_period;
442 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200443 local64_set(&hwc->period_left, hwc->sample_period);
Robert Richterc1726f32010-04-13 22:23:11 +0200444 } else {
445 /*
446 * If we have a PMU initialized but no APIC
447 * interrupts, we cannot sample hardware
448 * events (user-space has to fall back and
449 * sample via a hrtimer based software event):
450 */
451 if (!x86_pmu.apic)
452 return -EOPNOTSUPP;
453 }
454
455 if (attr->type == PERF_TYPE_RAW)
456 return 0;
457
458 if (attr->type == PERF_TYPE_HW_CACHE)
459 return set_ext_hw_attr(hwc, attr);
460
461 if (attr->config >= x86_pmu.max_events)
462 return -EINVAL;
463
464 /*
465 * The generic map:
466 */
467 config = x86_pmu.event_map(attr->config);
468
469 if (config == 0)
470 return -ENOENT;
471
472 if (config == -1LL)
473 return -EINVAL;
474
475 /*
476 * Branch tracing:
477 */
478 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
479 (hwc->sample_period == 1)) {
480 /* BTS is not supported by this architecture. */
481 if (!x86_pmu.bts)
482 return -EOPNOTSUPP;
483
484 /* BTS is currently only allowed for user-mode. */
485 if (!attr->exclude_kernel)
486 return -EOPNOTSUPP;
487 }
488
489 hwc->config |= config;
490
491 return 0;
492}
Robert Richter4261e0e2010-04-13 22:23:10 +0200493
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200494static int x86_pmu_hw_config(struct perf_event *event)
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300495{
Peter Zijlstraab608342010-04-08 23:03:20 +0200496 if (event->attr.precise_ip) {
497 int precise = 0;
498
499 /* Support for constant skid */
500 if (x86_pmu.pebs)
501 precise++;
502
503 /* Support for IP fixup */
504 if (x86_pmu.lbr_nr)
505 precise++;
506
507 if (event->attr.precise_ip > precise)
508 return -EOPNOTSUPP;
509 }
510
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300511 /*
512 * Generate PMC IRQs:
513 * (keep 'enabled' bit clear for now)
514 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200515 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300516
517 /*
518 * Count user and OS events unless requested not to
519 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200520 if (!event->attr.exclude_user)
521 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
522 if (!event->attr.exclude_kernel)
523 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
524
525 if (event->attr.type == PERF_TYPE_RAW)
526 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300527
Robert Richter9d0fcba62010-04-13 22:23:12 +0200528 return x86_setup_perfctr(event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300529}
530
Ingo Molnaree060942008-12-13 09:00:03 +0100531/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200532 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100533 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200534static int __x86_pmu_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100535{
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200536 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100537
Robert Richter85cf9db2009-04-29 12:47:20 +0200538 if (!x86_pmu_initialized())
539 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100540
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200541 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200542 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200543 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200544 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +0200545 if (!reserve_pmc_hardware())
546 err = -EBUSY;
Stephane Eranian4b24a882010-03-17 23:21:01 +0200547 else {
Peter Zijlstraca037702010-03-02 19:52:12 +0100548 err = reserve_ds_buffers();
Stephane Eranian4b24a882010-03-17 23:21:01 +0200549 if (err)
550 release_pmc_hardware();
551 }
Markus Metzger30dd5682009-07-21 15:56:48 +0200552 }
553 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200554 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200555 mutex_unlock(&pmc_reserve_mutex);
556 }
557 if (err)
558 return err;
559
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200560 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200561
Robert Richter4261e0e2010-04-13 22:23:10 +0200562 event->hw.idx = -1;
563 event->hw.last_cpu = -1;
564 event->hw.last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200565
Robert Richter9d0fcba62010-04-13 22:23:12 +0200566 return x86_pmu.hw_config(event);
Robert Richter4261e0e2010-04-13 22:23:10 +0200567}
568
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100569static void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530570{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200571 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200572 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100573
Robert Richter948b1bb2010-03-29 18:36:50 +0200574 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100575 u64 val;
576
Robert Richter43f62012009-04-29 16:55:56 +0200577 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200578 continue;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100579 rdmsrl(x86_pmu.eventsel + idx, val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100580 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200581 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100582 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100583 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530584 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530585}
586
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200587static void x86_pmu_disable(struct pmu *pmu)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530588{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200589 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
590
Robert Richter85cf9db2009-04-29 12:47:20 +0200591 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200592 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200593
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100594 if (!cpuc->enabled)
595 return;
596
597 cpuc->n_added = 0;
598 cpuc->enabled = 0;
599 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200600
601 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530602}
Ingo Molnar241771e2008-12-03 10:39:53 +0100603
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100604static void x86_pmu_enable_all(int added)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530605{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200606 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530607 int idx;
608
Robert Richter948b1bb2010-03-29 18:36:50 +0200609 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200610 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +0200611 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100612
Robert Richter43f62012009-04-29 16:55:56 +0200613 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200614 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200615
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200616 val = event->hw.config;
Robert Richterbb1165d2010-03-01 14:21:23 +0100617 val |= ARCH_PERFMON_EVENTSEL_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100618 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530619 }
620}
621
Peter Zijlstra51b0fe32010-06-11 13:35:57 +0200622static struct pmu pmu;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200623
624static inline int is_x86_event(struct perf_event *event)
625{
626 return event->pmu == &pmu;
627}
628
629static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
630{
Peter Zijlstra63b14642010-01-22 16:32:17 +0100631 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200632 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100633 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200634 struct hw_perf_event *hwc;
635
636 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
637
638 for (i = 0; i < n; i++) {
Peter Zijlstrab622d642010-02-01 15:36:30 +0100639 c = x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
640 constraints[i] = c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200641 }
642
643 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200644 * fastpath, try to reuse previous register
645 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100646 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200647 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +0100648 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200649
650 /* never assigned */
651 if (hwc->idx == -1)
652 break;
653
654 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100655 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200656 break;
657
658 /* not already used */
659 if (test_bit(hwc->idx, used_mask))
660 break;
661
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100662 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200663 if (assign)
664 assign[i] = hwc->idx;
665 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100666 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +0200667 goto done;
668
669 /*
670 * begin slow path
671 */
672
673 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
674
675 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +0200676 * weight = number of possible counters
677 *
678 * 1 = most constrained, only works on one counter
679 * wmax = least constrained, works on any counter
680 *
681 * assign events to counters starting with most
682 * constrained events.
683 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200684 wmax = x86_pmu.num_counters;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200685
686 /*
687 * when fixed event counters are present,
688 * wmax is incremented by 1 to account
689 * for one more choice
690 */
Robert Richter948b1bb2010-03-29 18:36:50 +0200691 if (x86_pmu.num_counters_fixed)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200692 wmax++;
693
Stephane Eranian81130702010-01-21 17:39:01 +0200694 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200695 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +0200696 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +0100697 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200698 hwc = &cpuc->event_list[i]->hw;
699
Peter Zijlstra272d30b2010-01-22 16:32:17 +0100700 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200701 continue;
702
Akinobu Mita984b3f52010-03-05 13:41:37 -0800703 for_each_set_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200704 if (!test_bit(j, used_mask))
705 break;
706 }
707
708 if (j == X86_PMC_IDX_MAX)
709 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200710
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100711 __set_bit(j, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200712
Stephane Eranian1da53e02010-01-18 10:58:01 +0200713 if (assign)
714 assign[i] = j;
715 num--;
716 }
717 }
Stephane Eranian81130702010-01-21 17:39:01 +0200718done:
Stephane Eranian1da53e02010-01-18 10:58:01 +0200719 /*
720 * scheduling failed or is just a simulation,
721 * free resources if necessary
722 */
723 if (!assign || num) {
724 for (i = 0; i < n; i++) {
725 if (x86_pmu.put_event_constraints)
726 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
727 }
728 }
729 return num ? -ENOSPC : 0;
730}
731
732/*
733 * dogrp: true if must collect siblings events (group)
734 * returns total number of events and error code
735 */
736static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
737{
738 struct perf_event *event;
739 int n, max_count;
740
Robert Richter948b1bb2010-03-29 18:36:50 +0200741 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200742
743 /* current number of events already accepted */
744 n = cpuc->n_events;
745
746 if (is_x86_event(leader)) {
747 if (n >= max_count)
748 return -ENOSPC;
749 cpuc->event_list[n] = leader;
750 n++;
751 }
752 if (!dogrp)
753 return n;
754
755 list_for_each_entry(event, &leader->sibling_list, group_entry) {
756 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200757 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200758 continue;
759
760 if (n >= max_count)
761 return -ENOSPC;
762
763 cpuc->event_list[n] = event;
764 n++;
765 }
766 return n;
767}
768
Stephane Eranian1da53e02010-01-18 10:58:01 +0200769static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200770 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200771{
Stephane Eranian447a1942010-02-01 14:50:01 +0200772 struct hw_perf_event *hwc = &event->hw;
773
774 hwc->idx = cpuc->assign[i];
775 hwc->last_cpu = smp_processor_id();
776 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200777
778 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
779 hwc->config_base = 0;
780 hwc->event_base = 0;
781 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
782 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
783 /*
784 * We set it so that event_base + idx in wrmsr/rdmsr maps to
785 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
786 */
787 hwc->event_base =
788 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
789 } else {
790 hwc->config_base = x86_pmu.eventsel;
791 hwc->event_base = x86_pmu.perfctr;
792 }
793}
794
Stephane Eranian447a1942010-02-01 14:50:01 +0200795static inline int match_prev_assignment(struct hw_perf_event *hwc,
796 struct cpu_hw_events *cpuc,
797 int i)
798{
799 return hwc->idx == cpuc->assign[i] &&
800 hwc->last_cpu == smp_processor_id() &&
801 hwc->last_tag == cpuc->tags[i];
802}
803
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200804static void x86_pmu_start(struct perf_event *event, int flags);
805static void x86_pmu_stop(struct perf_event *event, int flags);
Peter Zijlstra2e841872010-01-25 15:58:43 +0100806
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200807static void x86_pmu_enable(struct pmu *pmu)
Ingo Molnaree060942008-12-13 09:00:03 +0100808{
Stephane Eranian1da53e02010-01-18 10:58:01 +0200809 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
810 struct perf_event *event;
811 struct hw_perf_event *hwc;
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100812 int i, added = cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200813
Robert Richter85cf9db2009-04-29 12:47:20 +0200814 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100815 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100816
817 if (cpuc->enabled)
818 return;
819
Stephane Eranian1da53e02010-01-18 10:58:01 +0200820 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100821 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200822 /*
823 * apply assignment obtained either from
824 * hw_perf_group_sched_in() or x86_pmu_enable()
825 *
826 * step1: save events moving to new counters
827 * step2: reprogram moved events into new counters
828 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +0100829 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200830 event = cpuc->event_list[i];
831 hwc = &event->hw;
832
Stephane Eranian447a1942010-02-01 14:50:01 +0200833 /*
834 * we can avoid reprogramming counter if:
835 * - assigned same counter as last time
836 * - running on same CPU as last time
837 * - no other event has used the counter since
838 */
839 if (hwc->idx == -1 ||
840 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +0200841 continue;
842
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200843 /*
844 * Ensure we don't accidentally enable a stopped
845 * counter simply because we rescheduled.
846 */
847 if (hwc->state & PERF_HES_STOPPED)
848 hwc->state |= PERF_HES_ARCH;
849
850 x86_pmu_stop(event, PERF_EF_UPDATE);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200851 }
852
853 for (i = 0; i < cpuc->n_events; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200854 event = cpuc->event_list[i];
855 hwc = &event->hw;
856
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100857 if (!match_prev_assignment(hwc, cpuc, i))
Stephane Eranian447a1942010-02-01 14:50:01 +0200858 x86_assign_hw_event(event, cpuc, i);
Peter Zijlstra45e16a62010-03-11 13:40:30 +0100859 else if (i < n_running)
860 continue;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200861
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200862 if (hwc->state & PERF_HES_ARCH)
863 continue;
864
865 x86_pmu_start(event, PERF_EF_RELOAD);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200866 }
867 cpuc->n_added = 0;
868 perf_events_lapic_init();
869 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100870
871 cpuc->enabled = 1;
872 barrier();
873
Peter Zijlstra11164cd2010-03-26 14:08:44 +0100874 x86_pmu.enable_all(added);
Ingo Molnaree060942008-12-13 09:00:03 +0100875}
Ingo Molnaree060942008-12-13 09:00:03 +0100876
Robert Richter31fa58a2010-04-13 22:23:14 +0200877static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc,
878 u64 enable_mask)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100879{
Robert Richter31fa58a2010-04-13 22:23:14 +0200880 wrmsrl(hwc->config_base + hwc->idx, hwc->config | enable_mask);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100881}
882
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100883static inline void x86_pmu_disable_event(struct perf_event *event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100884{
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100885 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100886
887 wrmsrl(hwc->config_base + hwc->idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100888}
889
Tejun Heo245b2e72009-06-24 15:13:48 +0900890static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100891
Ingo Molnaree060942008-12-13 09:00:03 +0100892/*
893 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200894 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +0100895 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200896static int
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100897x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100898{
Peter Zijlstra07088ed2010-03-02 20:16:01 +0100899 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200900 s64 left = local64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200901 s64 period = hwc->sample_period;
Peter Zijlstra7645a242010-03-08 13:51:31 +0100902 int ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100903
Markus Metzger30dd5682009-07-21 15:56:48 +0200904 if (idx == X86_PMC_IDX_FIXED_BTS)
905 return 0;
906
Ingo Molnaree060942008-12-13 09:00:03 +0100907 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -0200908 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +0100909 */
910 if (unlikely(left <= -period)) {
911 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200912 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200913 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200914 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100915 }
916
917 if (unlikely(left <= 0)) {
918 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200919 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200920 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200921 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +0100922 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200923 /*
Ingo Molnardfc65092009-09-21 11:31:35 +0200924 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +0200925 */
926 if (unlikely(left < 2))
927 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +0100928
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200929 if (left > x86_pmu.max_period)
930 left = x86_pmu.max_period;
931
Tejun Heo245b2e72009-06-24 15:13:48 +0900932 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +0100933
934 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200935 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +0100936 * mark it to be able to extra future deltas:
937 */
Peter Zijlstrae7850592010-05-21 14:43:08 +0200938 local64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100939
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +0400940 wrmsrl(hwc->event_base + idx, (u64)(-left) & x86_pmu.cntval_mask);
941
942 /*
943 * Due to erratum on certan cpu we need
944 * a second write to be sure the register
945 * is updated properly
946 */
947 if (x86_pmu.perfctr_second_write) {
948 wrmsrl(hwc->event_base + idx,
Robert Richter948b1bb2010-03-29 18:36:50 +0200949 (u64)(-left) & x86_pmu.cntval_mask);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +0400950 }
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200951
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200952 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200953
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200954 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100955}
956
Peter Zijlstraaff3d912010-03-02 20:32:08 +0100957static void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +0200958{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200959 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +0200960 if (cpuc->enabled)
Robert Richter31fa58a2010-04-13 22:23:14 +0200961 __x86_pmu_enable_event(&event->hw,
962 ARCH_PERFMON_EVENTSEL_ENABLE);
Ingo Molnar241771e2008-12-03 10:39:53 +0100963}
964
Ingo Molnaree060942008-12-13 09:00:03 +0100965/*
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200966 * Add a single event to the PMU.
Stephane Eranian1da53e02010-01-18 10:58:01 +0200967 *
968 * The event is added to the group of enabled events
969 * but only if it can be scehduled with existing events.
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200970 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200971static int x86_pmu_add(struct perf_event *event, int flags)
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200972{
973 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200974 struct hw_perf_event *hwc;
975 int assign[X86_PMC_IDX_MAX];
976 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200977
Stephane Eranian1da53e02010-01-18 10:58:01 +0200978 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200979
Peter Zijlstra33696fc2010-06-14 08:49:00 +0200980 perf_pmu_disable(event->pmu);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200981 n0 = cpuc->n_events;
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200982 ret = n = collect_events(cpuc, event, false);
983 if (ret < 0)
984 goto out;
Ingo Molnar53b441a2009-05-25 21:41:28 +0200985
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200986 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
987 if (!(flags & PERF_EF_START))
988 hwc->state |= PERF_HES_ARCH;
989
Lin Ming4d1c52b2010-04-23 13:56:12 +0800990 /*
991 * If group events scheduling transaction was started,
992 * skip the schedulability test here, it will be peformed
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200993 * at commit time (->commit_txn) as a whole
Lin Ming4d1c52b2010-04-23 13:56:12 +0800994 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +0200995 if (cpuc->group_flag & PERF_EVENT_TXN)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +0200996 goto done_collect;
Lin Ming4d1c52b2010-04-23 13:56:12 +0800997
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300998 ret = x86_pmu.schedule_events(cpuc, n, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200999 if (ret)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001000 goto out;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001001 /*
1002 * copy new assignment, now we know it is possible
1003 * will be used by hw_perf_enable()
1004 */
1005 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +01001006
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001007done_collect:
Stephane Eranian1da53e02010-01-18 10:58:01 +02001008 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +01001009 cpuc->n_added += n - n0;
Stephane Eranian90151c352010-05-25 16:23:10 +02001010 cpuc->n_txn += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001011
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001012 ret = 0;
1013out:
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001014 perf_pmu_enable(event->pmu);
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001015 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001016}
1017
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001018static void x86_pmu_start(struct perf_event *event, int flags)
Stephane Eraniand76a0812010-02-08 17:06:01 +02001019{
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001020 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1021 int idx = event->hw.idx;
1022
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001023 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1024 return;
Stephane Eraniand76a0812010-02-08 17:06:01 +02001025
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001026 if (WARN_ON_ONCE(idx == -1))
1027 return;
1028
1029 if (flags & PERF_EF_RELOAD) {
1030 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1031 x86_perf_event_set_period(event);
1032 }
1033
1034 event->hw.state = 0;
1035
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001036 cpuc->events[idx] = event;
1037 __set_bit(idx, cpuc->active_mask);
Robert Richter63e6be62010-09-15 18:20:34 +02001038 __set_bit(idx, cpuc->running);
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001039 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001040 perf_event_update_userpage(event);
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001041}
1042
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001043void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001044{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001045 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Peter Zijlstraca037702010-03-02 19:52:12 +01001046 u64 pebs;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001047 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001048 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001049 int cpu, idx;
1050
Robert Richter948b1bb2010-03-29 18:36:50 +02001051 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +01001052 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001053
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001054 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001055
1056 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001057 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001058
Robert Richterfaa28ae2009-04-29 12:47:13 +02001059 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301060 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1061 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1062 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1063 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +01001064 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001065
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301066 pr_info("\n");
1067 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1068 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1069 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1070 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Peter Zijlstraca037702010-03-02 19:52:12 +01001071 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301072 }
Peter Zijlstra7645a242010-03-08 13:51:31 +01001073 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001074
Robert Richter948b1bb2010-03-29 18:36:50 +02001075 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001076 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1077 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001078
Tejun Heo245b2e72009-06-24 15:13:48 +09001079 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001080
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301081 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001082 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301083 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001084 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301085 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001086 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001087 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001088 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001089 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1090
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301091 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001092 cpu, idx, pmc_count);
1093 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001094 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001095}
1096
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001097static void x86_pmu_stop(struct perf_event *event, int flags)
Ingo Molnar241771e2008-12-03 10:39:53 +01001098{
Stephane Eraniand76a0812010-02-08 17:06:01 +02001099 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001100 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +01001101
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001102 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1103 x86_pmu.disable(event);
1104 cpuc->events[hwc->idx] = NULL;
1105 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1106 hwc->state |= PERF_HES_STOPPED;
1107 }
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001108
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001109 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1110 /*
1111 * Drain the remaining delta count out of a event
1112 * that we are disabling:
1113 */
1114 x86_perf_event_update(event);
1115 hwc->state |= PERF_HES_UPTODATE;
1116 }
Peter Zijlstra2e841872010-01-25 15:58:43 +01001117}
1118
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001119static void x86_pmu_del(struct perf_event *event, int flags)
Peter Zijlstra2e841872010-01-25 15:58:43 +01001120{
1121 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1122 int i;
1123
Stephane Eranian90151c352010-05-25 16:23:10 +02001124 /*
1125 * If we're called during a txn, we don't need to do anything.
1126 * The events never got scheduled and ->cancel_txn will truncate
1127 * the event_list.
1128 */
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001129 if (cpuc->group_flag & PERF_EVENT_TXN)
Stephane Eranian90151c352010-05-25 16:23:10 +02001130 return;
1131
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001132 x86_pmu_stop(event, PERF_EF_UPDATE);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001133
Stephane Eranian1da53e02010-01-18 10:58:01 +02001134 for (i = 0; i < cpuc->n_events; i++) {
1135 if (event == cpuc->event_list[i]) {
1136
1137 if (x86_pmu.put_event_constraints)
1138 x86_pmu.put_event_constraints(cpuc, event);
1139
1140 while (++i < cpuc->n_events)
1141 cpuc->event_list[i-1] = cpuc->event_list[i];
1142
1143 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001144 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001145 }
1146 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001147 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001148}
1149
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001150static int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001151{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001152 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001153 struct cpu_hw_events *cpuc;
1154 struct perf_event *event;
Vince Weaver11d15782009-07-08 17:46:14 -04001155 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001156 u64 val;
1157
Peter Zijlstradc1d6282010-03-03 15:55:04 +01001158 perf_sample_data_init(&data, 0);
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001159
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001160 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001161
Robert Richter948b1bb2010-03-29 18:36:50 +02001162 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter63e6be62010-09-15 18:20:34 +02001163 if (!test_bit(idx, cpuc->active_mask)) {
1164 /*
1165 * Though we deactivated the counter some cpus
1166 * might still deliver spurious interrupts still
1167 * in flight. Catch them:
1168 */
1169 if (__test_and_clear_bit(idx, cpuc->running))
1170 handled++;
Robert Richtera29aa8a2009-04-29 12:47:21 +02001171 continue;
Robert Richter63e6be62010-09-15 18:20:34 +02001172 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001173
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001174 event = cpuc->events[idx];
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001175
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001176 val = x86_perf_event_update(event);
Robert Richter948b1bb2010-03-29 18:36:50 +02001177 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001178 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001179
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001180 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001181 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001182 */
Robert Richter4177c422010-09-02 15:07:48 -04001183 handled++;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001184 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001185
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001186 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001187 continue;
1188
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001189 if (perf_event_overflow(event, 1, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001190 x86_pmu_stop(event, 0);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001191 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001192
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001193 if (handled)
1194 inc_irq_stat(apic_perf_irqs);
1195
Robert Richtera29aa8a2009-04-29 12:47:21 +02001196 return handled;
1197}
Robert Richter39d81ea2009-04-29 12:47:05 +02001198
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001199void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001200{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001201 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001202 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001203
Ingo Molnar241771e2008-12-03 10:39:53 +01001204 /*
Yong Wangc323d952009-05-29 13:28:35 +08001205 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001206 */
Yong Wangc323d952009-05-29 13:28:35 +08001207 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar241771e2008-12-03 10:39:53 +01001208}
1209
Robert Richter4177c422010-09-02 15:07:48 -04001210struct pmu_nmi_state {
1211 unsigned int marked;
1212 int handled;
1213};
1214
1215static DEFINE_PER_CPU(struct pmu_nmi_state, pmu_nmi);
1216
Ingo Molnar241771e2008-12-03 10:39:53 +01001217static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001218perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01001219 unsigned long cmd, void *__args)
1220{
1221 struct die_args *args = __args;
Robert Richter4177c422010-09-02 15:07:48 -04001222 unsigned int this_nmi;
1223 int handled;
Ingo Molnar241771e2008-12-03 10:39:53 +01001224
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001225 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001226 return NOTIFY_DONE;
1227
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001228 switch (cmd) {
1229 case DIE_NMI:
1230 case DIE_NMI_IPI:
1231 break;
Robert Richter4177c422010-09-02 15:07:48 -04001232 case DIE_NMIUNKNOWN:
1233 this_nmi = percpu_read(irq_stat.__nmi_count);
1234 if (this_nmi != __get_cpu_var(pmu_nmi).marked)
1235 /* let the kernel handle the unknown nmi */
1236 return NOTIFY_DONE;
1237 /*
1238 * This one is a PMU back-to-back nmi. Two events
1239 * trigger 'simultaneously' raising two back-to-back
1240 * NMIs. If the first NMI handles both, the latter
1241 * will be empty and daze the CPU. So, we drop it to
1242 * avoid false-positive 'unknown nmi' messages.
1243 */
1244 return NOTIFY_STOP;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001245 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001246 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001247 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001248
Ingo Molnar241771e2008-12-03 10:39:53 +01001249 apic_write(APIC_LVTPC, APIC_DM_NMI);
Robert Richter4177c422010-09-02 15:07:48 -04001250
1251 handled = x86_pmu.handle_irq(args->regs);
1252 if (!handled)
1253 return NOTIFY_DONE;
1254
1255 this_nmi = percpu_read(irq_stat.__nmi_count);
1256 if ((handled > 1) ||
1257 /* the next nmi could be a back-to-back nmi */
1258 ((__get_cpu_var(pmu_nmi).marked == this_nmi) &&
1259 (__get_cpu_var(pmu_nmi).handled > 1))) {
1260 /*
1261 * We could have two subsequent back-to-back nmis: The
1262 * first handles more than one counter, the 2nd
1263 * handles only one counter and the 3rd handles no
1264 * counter.
1265 *
1266 * This is the 2nd nmi because the previous was
1267 * handling more than one counter. We will mark the
1268 * next (3rd) and then drop it if unhandled.
1269 */
1270 __get_cpu_var(pmu_nmi).marked = this_nmi + 1;
1271 __get_cpu_var(pmu_nmi).handled = handled;
1272 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001273
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001274 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001275}
1276
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001277static __read_mostly struct notifier_block perf_event_nmi_notifier = {
1278 .notifier_call = perf_event_nmi_handler,
1279 .next = NULL,
1280 .priority = 1
1281};
1282
Peter Zijlstra63b14642010-01-22 16:32:17 +01001283static struct event_constraint unconstrained;
Stephane Eranian38331f62010-02-08 17:17:01 +02001284static struct event_constraint emptyconstraint;
Peter Zijlstra63b14642010-01-22 16:32:17 +01001285
Peter Zijlstra63b14642010-01-22 16:32:17 +01001286static struct event_constraint *
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001287x86_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001288{
Peter Zijlstra63b14642010-01-22 16:32:17 +01001289 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001290
Stephane Eranian1da53e02010-01-18 10:58:01 +02001291 if (x86_pmu.event_constraints) {
1292 for_each_event_constraint(c, x86_pmu.event_constraints) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01001293 if ((event->hw.config & c->cmask) == c->code)
1294 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001295 }
1296 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01001297
1298 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001299}
1300
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001301#include "perf_event_amd.c"
1302#include "perf_event_p6.c"
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001303#include "perf_event_p4.c"
Peter Zijlstracaff2be2010-03-03 12:02:30 +01001304#include "perf_event_intel_lbr.c"
Peter Zijlstraca037702010-03-02 19:52:12 +01001305#include "perf_event_intel_ds.c"
Peter Zijlstraf22f54f2010-02-26 12:05:05 +01001306#include "perf_event_intel.c"
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301307
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001308static int __cpuinit
1309x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1310{
1311 unsigned int cpu = (long)hcpu;
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001312 int ret = NOTIFY_OK;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001313
1314 switch (action & ~CPU_TASKS_FROZEN) {
1315 case CPU_UP_PREPARE:
1316 if (x86_pmu.cpu_prepare)
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001317 ret = x86_pmu.cpu_prepare(cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001318 break;
1319
1320 case CPU_STARTING:
1321 if (x86_pmu.cpu_starting)
1322 x86_pmu.cpu_starting(cpu);
1323 break;
1324
1325 case CPU_DYING:
1326 if (x86_pmu.cpu_dying)
1327 x86_pmu.cpu_dying(cpu);
1328 break;
1329
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001330 case CPU_UP_CANCELED:
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001331 case CPU_DEAD:
1332 if (x86_pmu.cpu_dead)
1333 x86_pmu.cpu_dead(cpu);
1334 break;
1335
1336 default:
1337 break;
1338 }
1339
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001340 return ret;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001341}
1342
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001343static void __init pmu_check_apic(void)
1344{
1345 if (cpu_has_apic)
1346 return;
1347
1348 x86_pmu.apic = 0;
1349 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1350 pr_info("no hardware sampling interrupt available.\n");
1351}
1352
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001353void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301354{
Peter Zijlstrab622d642010-02-01 15:36:30 +01001355 struct event_constraint *c;
Robert Richter72eae042009-04-29 12:47:10 +02001356 int err;
1357
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001358 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001359
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301360 switch (boot_cpu_data.x86_vendor) {
1361 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001362 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301363 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301364 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001365 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301366 break;
Robert Richter41389602009-04-29 12:47:00 +02001367 default:
1368 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301369 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001370 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001371 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301372 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001373 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301374
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001375 pmu_check_apic();
1376
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001377 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001378
Peter Zijlstra3c447802010-03-04 21:49:01 +01001379 if (x86_pmu.quirks)
1380 x86_pmu.quirks();
1381
Robert Richter948b1bb2010-03-29 18:36:50 +02001382 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001383 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001384 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
1385 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001386 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001387 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
Ingo Molnar241771e2008-12-03 10:39:53 +01001388
Robert Richter948b1bb2010-03-29 18:36:50 +02001389 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001390 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
Robert Richter948b1bb2010-03-29 18:36:50 +02001391 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
1392 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001393 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001394
Robert Richterd6dc0b42010-03-17 12:49:13 +01001395 x86_pmu.intel_ctrl |=
Robert Richter948b1bb2010-03-29 18:36:50 +02001396 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001397
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001398 perf_events_lapic_init();
1399 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001400
Peter Zijlstra63b14642010-01-22 16:32:17 +01001401 unconstrained = (struct event_constraint)
Robert Richter948b1bb2010-03-29 18:36:50 +02001402 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
1403 0, x86_pmu.num_counters);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001404
Peter Zijlstrab622d642010-02-01 15:36:30 +01001405 if (x86_pmu.event_constraints) {
1406 for_each_event_constraint(c, x86_pmu.event_constraints) {
Robert Richtera098f442010-03-30 11:28:21 +02001407 if (c->cmask != X86_RAW_EVENT_MASK)
Peter Zijlstrab622d642010-02-01 15:36:30 +01001408 continue;
1409
Robert Richter948b1bb2010-03-29 18:36:50 +02001410 c->idxmsk64 |= (1ULL << x86_pmu.num_counters) - 1;
1411 c->weight += x86_pmu.num_counters;
Peter Zijlstrab622d642010-02-01 15:36:30 +01001412 }
1413 }
1414
Ingo Molnar57c0c152009-09-21 12:20:38 +02001415 pr_info("... version: %d\n", x86_pmu.version);
Robert Richter948b1bb2010-03-29 18:36:50 +02001416 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1417 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1418 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
Ingo Molnar57c0c152009-09-21 12:20:38 +02001419 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
Robert Richter948b1bb2010-03-29 18:36:50 +02001420 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
Robert Richterd6dc0b42010-03-17 12:49:13 +01001421 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001422
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001423 perf_pmu_register(&pmu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001424 perf_cpu_notifier(x86_pmu_notifier);
Ingo Molnar241771e2008-12-03 10:39:53 +01001425}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001426
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001427static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001428{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001429 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001430}
1431
Lin Ming4d1c52b2010-04-23 13:56:12 +08001432/*
1433 * Start group events scheduling transaction
1434 * Set the flag to make pmu::enable() not perform the
1435 * schedulability test, it will be performed at commit time
1436 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001437static void x86_pmu_start_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001438{
1439 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1440
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001441 perf_pmu_disable(pmu);
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001442 cpuc->group_flag |= PERF_EVENT_TXN;
Stephane Eranian90151c352010-05-25 16:23:10 +02001443 cpuc->n_txn = 0;
Lin Ming4d1c52b2010-04-23 13:56:12 +08001444}
1445
1446/*
1447 * Stop group events scheduling transaction
1448 * Clear the flag and pmu::enable() will perform the
1449 * schedulability test.
1450 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001451static void x86_pmu_cancel_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001452{
1453 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1454
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001455 cpuc->group_flag &= ~PERF_EVENT_TXN;
Stephane Eranian90151c352010-05-25 16:23:10 +02001456 /*
1457 * Truncate the collected events.
1458 */
1459 cpuc->n_added -= cpuc->n_txn;
1460 cpuc->n_events -= cpuc->n_txn;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001461 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001462}
1463
1464/*
1465 * Commit group events scheduling transaction
1466 * Perform the group schedulability test as a whole
1467 * Return 0 if success
1468 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001469static int x86_pmu_commit_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001470{
1471 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1472 int assign[X86_PMC_IDX_MAX];
1473 int n, ret;
1474
1475 n = cpuc->n_events;
1476
1477 if (!x86_pmu_initialized())
1478 return -EAGAIN;
1479
1480 ret = x86_pmu.schedule_events(cpuc, n, assign);
1481 if (ret)
1482 return ret;
1483
1484 /*
1485 * copy new assignment, now we know it is possible
1486 * will be used by hw_perf_enable()
1487 */
1488 memcpy(cpuc->assign, assign, n*sizeof(int));
1489
Peter Zijlstra8d2cacb2010-05-25 17:49:05 +02001490 cpuc->group_flag &= ~PERF_EVENT_TXN;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001491 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001492 return 0;
1493}
1494
Stephane Eranian1da53e02010-01-18 10:58:01 +02001495/*
Peter Zijlstraca037702010-03-02 19:52:12 +01001496 * validate that we can schedule this event
1497 */
1498static int validate_event(struct perf_event *event)
1499{
1500 struct cpu_hw_events *fake_cpuc;
1501 struct event_constraint *c;
1502 int ret = 0;
1503
1504 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1505 if (!fake_cpuc)
1506 return -ENOMEM;
1507
1508 c = x86_pmu.get_event_constraints(fake_cpuc, event);
1509
1510 if (!c || !c->weight)
1511 ret = -ENOSPC;
1512
1513 if (x86_pmu.put_event_constraints)
1514 x86_pmu.put_event_constraints(fake_cpuc, event);
1515
1516 kfree(fake_cpuc);
1517
1518 return ret;
1519}
1520
1521/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001522 * validate a single event group
1523 *
1524 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01001525 * - check events are compatible which each other
1526 * - events do not compete for the same counter
1527 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001528 *
1529 * validation ensures the group can be loaded onto the
1530 * PMU if it was the only group available.
1531 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001532static int validate_group(struct perf_event *event)
1533{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001534 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01001535 struct cpu_hw_events *fake_cpuc;
1536 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001537
Peter Zijlstra502568d2010-01-22 14:35:46 +01001538 ret = -ENOMEM;
1539 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
1540 if (!fake_cpuc)
1541 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001542
Stephane Eranian1da53e02010-01-18 10:58:01 +02001543 /*
1544 * the event is not yet connected with its
1545 * siblings therefore we must first collect
1546 * existing siblings, then add the new event
1547 * before we can simulate the scheduling
1548 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01001549 ret = -ENOSPC;
1550 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001551 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001552 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001553
Peter Zijlstra502568d2010-01-22 14:35:46 +01001554 fake_cpuc->n_events = n;
1555 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001556 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01001557 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001558
Peter Zijlstra502568d2010-01-22 14:35:46 +01001559 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001560
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001561 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
Peter Zijlstra502568d2010-01-22 14:35:46 +01001562
1563out_free:
1564 kfree(fake_cpuc);
1565out:
1566 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001567}
1568
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001569int x86_pmu_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001570{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001571 struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001572 int err;
1573
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001574 switch (event->attr.type) {
1575 case PERF_TYPE_RAW:
1576 case PERF_TYPE_HARDWARE:
1577 case PERF_TYPE_HW_CACHE:
1578 break;
1579
1580 default:
1581 return -ENOENT;
1582 }
1583
1584 err = __x86_pmu_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001585 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02001586 /*
1587 * we temporarily connect event to its pmu
1588 * such that validate_group() can classify
1589 * it as an x86 event using is_x86_event()
1590 */
1591 tmp = event->pmu;
1592 event->pmu = &pmu;
1593
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001594 if (event->group_leader != event)
1595 err = validate_group(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001596 else
1597 err = validate_event(event);
Stephane Eranian81130702010-01-21 17:39:01 +02001598
1599 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001600 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001601 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001602 if (event->destroy)
1603 event->destroy(event);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001604 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01001605
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001606 return err;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001607}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001608
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001609static struct pmu pmu = {
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001610 .pmu_enable = x86_pmu_enable,
1611 .pmu_disable = x86_pmu_disable,
1612
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001613 .event_init = x86_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001614
1615 .add = x86_pmu_add,
1616 .del = x86_pmu_del,
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001617 .start = x86_pmu_start,
1618 .stop = x86_pmu_stop,
1619 .read = x86_pmu_read,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001620
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001621 .start_txn = x86_pmu_start_txn,
1622 .cancel_txn = x86_pmu_cancel_txn,
1623 .commit_txn = x86_pmu_commit_txn,
1624};
1625
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001626/*
1627 * callchain support
1628 */
1629
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001630static void
1631backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1632{
1633 /* Ignore warnings */
1634}
1635
1636static void backtrace_warning(void *data, char *msg)
1637{
1638 /* Ignore warnings */
1639}
1640
1641static int backtrace_stack(void *data, char *name)
1642{
Ingo Molnar038e8362009-06-15 09:57:59 +02001643 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001644}
1645
1646static void backtrace_address(void *data, unsigned long addr, int reliable)
1647{
1648 struct perf_callchain_entry *entry = data;
1649
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001650 perf_callchain_store(entry, addr);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001651}
1652
1653static const struct stacktrace_ops backtrace_ops = {
1654 .warning = backtrace_warning,
1655 .warning_symbol = backtrace_warning_symbol,
1656 .stack = backtrace_stack,
1657 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01001658 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001659};
1660
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001661void
1662perf_callchain_kernel(struct perf_callchain_entry *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001663{
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001664 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1665 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02001666 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001667 }
1668
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001669 perf_callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001670
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01001671 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001672}
1673
Torok Edwin257ef9d2010-03-17 12:07:16 +02001674#ifdef CONFIG_COMPAT
1675static inline int
1676perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001677{
Torok Edwin257ef9d2010-03-17 12:07:16 +02001678 /* 32-bit process in 64-bit kernel. */
1679 struct stack_frame_ia32 frame;
1680 const void __user *fp;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001681
Torok Edwin257ef9d2010-03-17 12:07:16 +02001682 if (!test_thread_flag(TIF_IA32))
1683 return 0;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001684
Torok Edwin257ef9d2010-03-17 12:07:16 +02001685 fp = compat_ptr(regs->bp);
1686 while (entry->nr < PERF_MAX_STACK_DEPTH) {
1687 unsigned long bytes;
1688 frame.next_frame = 0;
1689 frame.return_address = 0;
1690
1691 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1692 if (bytes != sizeof(frame))
1693 break;
1694
1695 if (fp < compat_ptr(regs->sp))
1696 break;
1697
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001698 perf_callchain_store(entry, frame.return_address);
Torok Edwin257ef9d2010-03-17 12:07:16 +02001699 fp = compat_ptr(frame.next_frame);
1700 }
1701 return 1;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001702}
Torok Edwin257ef9d2010-03-17 12:07:16 +02001703#else
1704static inline int
1705perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry *entry)
1706{
1707 return 0;
1708}
1709#endif
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001710
Frederic Weisbecker56962b42010-06-30 23:03:51 +02001711void
1712perf_callchain_user(struct perf_callchain_entry *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001713{
1714 struct stack_frame frame;
1715 const void __user *fp;
1716
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001717 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
1718 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02001719 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02001720 }
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001721
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001722 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001723
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001724 perf_callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001725
Torok Edwin257ef9d2010-03-17 12:07:16 +02001726 if (perf_callchain_user32(regs, entry))
1727 return;
1728
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001729 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02001730 unsigned long bytes;
Ingo Molnar038e8362009-06-15 09:57:59 +02001731 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001732 frame.return_address = 0;
1733
Torok Edwin257ef9d2010-03-17 12:07:16 +02001734 bytes = copy_from_user_nmi(&frame, fp, sizeof(frame));
1735 if (bytes != sizeof(frame))
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001736 break;
1737
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001738 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001739 break;
1740
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02001741 perf_callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001742 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001743 }
1744}
1745
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001746unsigned long perf_instruction_pointer(struct pt_regs *regs)
1747{
1748 unsigned long ip;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001749
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001750 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
1751 ip = perf_guest_cbs->get_guest_ip();
1752 else
1753 ip = instruction_pointer(regs);
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001754
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001755 return ip;
1756}
1757
1758unsigned long perf_misc_flags(struct pt_regs *regs)
1759{
1760 int misc = 0;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001761
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001762 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08001763 if (perf_guest_cbs->is_user_mode())
1764 misc |= PERF_RECORD_MISC_GUEST_USER;
1765 else
1766 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
1767 } else {
1768 if (user_mode(regs))
1769 misc |= PERF_RECORD_MISC_USER;
1770 else
1771 misc |= PERF_RECORD_MISC_KERNEL;
1772 }
1773
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001774 if (regs->flags & PERF_EFLAGS_EXACT)
Peter Zijlstraab608342010-04-08 23:03:20 +02001775 misc |= PERF_RECORD_MISC_EXACT_IP;
Zhang, Yanmin39447b32010-04-19 13:32:41 +08001776
1777 return misc;
1778}