blob: 9173ea95f918ad1204435f65a8a1cc75d577bfe2 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010020#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021#include <linux/kdebug.h>
22#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <linux/uaccess.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020024#include <linux/highmem.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020025#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010026#include <linux/bitops.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010027
Ingo Molnar241771e2008-12-03 10:39:53 +010028#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020029#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020030#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010031
Ingo Molnarcdd6c482009-09-21 12:02:48 +020032static u64 perf_event_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010033
Ingo Molnarcdd6c482009-09-21 12:02:48 +020034/* The maximal number of PEBS events: */
35#define MAX_PEBS_EVENTS 4
Markus Metzger30dd5682009-07-21 15:56:48 +020036
37/* The size of a BTS record in bytes: */
38#define BTS_RECORD_SIZE 24
39
40/* The size of a per-cpu BTS buffer in bytes: */
Markus Metzger5622f292009-09-15 13:00:23 +020041#define BTS_BUFFER_SIZE (BTS_RECORD_SIZE * 2048)
Markus Metzger30dd5682009-07-21 15:56:48 +020042
43/* The BTS overflow threshold in bytes from the end of the buffer: */
Markus Metzger5622f292009-09-15 13:00:23 +020044#define BTS_OVFL_TH (BTS_RECORD_SIZE * 128)
Markus Metzger30dd5682009-07-21 15:56:48 +020045
46
47/*
48 * Bits in the debugctlmsr controlling branch tracing.
49 */
50#define X86_DEBUGCTL_TR (1 << 6)
51#define X86_DEBUGCTL_BTS (1 << 7)
52#define X86_DEBUGCTL_BTINT (1 << 8)
53#define X86_DEBUGCTL_BTS_OFF_OS (1 << 9)
54#define X86_DEBUGCTL_BTS_OFF_USR (1 << 10)
55
56/*
57 * A debug store configuration.
58 *
59 * We only support architectures that use 64bit fields.
60 */
61struct debug_store {
62 u64 bts_buffer_base;
63 u64 bts_index;
64 u64 bts_absolute_maximum;
65 u64 bts_interrupt_threshold;
66 u64 pebs_buffer_base;
67 u64 pebs_index;
68 u64 pebs_absolute_maximum;
69 u64 pebs_interrupt_threshold;
Ingo Molnarcdd6c482009-09-21 12:02:48 +020070 u64 pebs_event_reset[MAX_PEBS_EVENTS];
Markus Metzger30dd5682009-07-21 15:56:48 +020071};
72
Stephane Eranian1da53e02010-01-18 10:58:01 +020073struct event_constraint {
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010074 union {
75 unsigned long idxmsk[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
76 u64 idxmsk64[1];
77 };
Stephane Eranian1da53e02010-01-18 10:58:01 +020078 int code;
79 int cmask;
Peter Zijlstra272d30b2010-01-22 16:32:17 +010080 int weight;
Stephane Eranian1da53e02010-01-18 10:58:01 +020081};
82
Ingo Molnarcdd6c482009-09-21 12:02:48 +020083struct cpu_hw_events {
Stephane Eranian1da53e02010-01-18 10:58:01 +020084 struct perf_event *events[X86_PMC_IDX_MAX]; /* in counter order */
Robert Richter43f62012009-04-29 16:55:56 +020085 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010086 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010087 int enabled;
Markus Metzger30dd5682009-07-21 15:56:48 +020088 struct debug_store *ds;
Stephane Eranian1da53e02010-01-18 10:58:01 +020089
90 int n_events;
91 int n_added;
92 int assign[X86_PMC_IDX_MAX]; /* event to counter assignment */
Stephane Eranian447a1942010-02-01 14:50:01 +020093 u64 tags[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +020094 struct perf_event *event_list[X86_PMC_IDX_MAX]; /* in enabled order */
Ingo Molnar241771e2008-12-03 10:39:53 +010095};
96
Peter Zijlstrafce877e2010-01-29 13:25:12 +010097#define __EVENT_CONSTRAINT(c, n, m, w) {\
Peter Zijlstrac91e0f52010-01-22 15:25:59 +010098 { .idxmsk64[0] = (n) }, \
99 .code = (c), \
100 .cmask = (m), \
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100101 .weight = (w), \
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100102}
Stephane Eranianb6900812009-10-06 16:42:09 +0200103
Peter Zijlstrafce877e2010-01-29 13:25:12 +0100104#define EVENT_CONSTRAINT(c, n, m) \
105 __EVENT_CONSTRAINT(c, n, m, HWEIGHT(n))
106
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100107#define INTEL_EVENT_CONSTRAINT(c, n) \
108 EVENT_CONSTRAINT(c, n, INTEL_ARCH_EVTSEL_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100109
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100110#define FIXED_EVENT_CONSTRAINT(c, n) \
111 EVENT_CONSTRAINT(c, n, INTEL_ARCH_FIXED_MASK)
Peter Zijlstra8433be12010-01-22 15:38:26 +0100112
Peter Zijlstraed8777f2010-01-27 23:07:46 +0100113#define EVENT_CONSTRAINT_END \
114 EVENT_CONSTRAINT(0, 0, 0)
115
116#define for_each_event_constraint(e, c) \
117 for ((e) = (c); (e)->cmask; (e)++)
Stephane Eranianb6900812009-10-06 16:42:09 +0200118
Ingo Molnar241771e2008-12-03 10:39:53 +0100119/*
Robert Richter5f4ec282009-04-29 12:47:04 +0200120 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +0100121 */
Robert Richter5f4ec282009-04-29 12:47:04 +0200122struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +0200123 const char *name;
124 int version;
Yong Wanga3288102009-06-03 13:12:55 +0800125 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200126 void (*disable_all)(void);
127 void (*enable_all)(void);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200128 void (*enable)(struct hw_perf_event *, int);
129 void (*disable)(struct hw_perf_event *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530130 unsigned eventsel;
131 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100132 u64 (*event_map)(int);
133 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530134 int max_events;
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200135 int num_events;
136 int num_events_fixed;
137 int event_bits;
138 u64 event_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200139 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +0200140 u64 max_period;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200141 u64 intel_ctrl;
Markus Metzger30dd5682009-07-21 15:56:48 +0200142 void (*enable_bts)(u64 config);
143 void (*disable_bts)(void);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100144
145 struct event_constraint *
146 (*get_event_constraints)(struct cpu_hw_events *cpuc,
147 struct perf_event *event);
148
Peter Zijlstrac91e0f52010-01-22 15:25:59 +0100149 void (*put_event_constraints)(struct cpu_hw_events *cpuc,
150 struct perf_event *event);
Peter Zijlstra63b14642010-01-22 16:32:17 +0100151 struct event_constraint *event_constraints;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530152};
153
Robert Richter4a06bd82009-04-29 12:47:11 +0200154static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530155
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200156static DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100157 .enabled = 1,
158};
Ingo Molnar241771e2008-12-03 10:39:53 +0100159
Stephane Eranian1da53e02010-01-18 10:58:01 +0200160static int x86_perf_event_set_period(struct perf_event *event,
161 struct hw_perf_event *hwc, int idx);
Stephane Eranianb6900812009-10-06 16:42:09 +0200162
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530163/*
Vince Weaver11d15782009-07-08 17:46:14 -0400164 * Not sure about some of these
165 */
166static const u64 p6_perfmon_event_map[] =
167{
168 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
169 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
Ingo Molnarf64cccc2009-08-11 10:26:33 +0200170 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
171 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
Vince Weaver11d15782009-07-08 17:46:14 -0400172 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
173 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
174 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
175};
176
Ingo Molnardfc65092009-09-21 11:31:35 +0200177static u64 p6_pmu_event_map(int hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400178{
Ingo Molnardfc65092009-09-21 11:31:35 +0200179 return p6_perfmon_event_map[hw_event];
Vince Weaver11d15782009-07-08 17:46:14 -0400180}
181
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200182/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200183 * Event setting that is specified not to count anything.
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200184 * We use this to effectively disable a counter.
185 *
186 * L2_RQSTS with 0 MESI unit mask.
187 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200188#define P6_NOP_EVENT 0x0000002EULL
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200189
Ingo Molnardfc65092009-09-21 11:31:35 +0200190static u64 p6_pmu_raw_event(u64 hw_event)
Vince Weaver11d15782009-07-08 17:46:14 -0400191{
192#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
193#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
194#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
195#define P6_EVNTSEL_INV_MASK 0x00800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200196#define P6_EVNTSEL_REG_MASK 0xFF000000ULL
Vince Weaver11d15782009-07-08 17:46:14 -0400197
198#define P6_EVNTSEL_MASK \
199 (P6_EVNTSEL_EVENT_MASK | \
200 P6_EVNTSEL_UNIT_MASK | \
201 P6_EVNTSEL_EDGE_MASK | \
202 P6_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200203 P6_EVNTSEL_REG_MASK)
Vince Weaver11d15782009-07-08 17:46:14 -0400204
Ingo Molnardfc65092009-09-21 11:31:35 +0200205 return hw_event & P6_EVNTSEL_MASK;
Vince Weaver11d15782009-07-08 17:46:14 -0400206}
207
Stephane Eranian1da53e02010-01-18 10:58:01 +0200208static struct event_constraint intel_p6_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200209{
Peter Zijlstra8433be12010-01-22 15:38:26 +0100210 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FLOPS */
211 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
212 INTEL_EVENT_CONSTRAINT(0x11, 0x1), /* FP_ASSIST */
213 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
214 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
215 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
Stephane Eranianb6900812009-10-06 16:42:09 +0200216 EVENT_CONSTRAINT_END
217};
Vince Weaver11d15782009-07-08 17:46:14 -0400218
219/*
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530220 * Intel PerfMon v3. Used on Core2 and later.
221 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100222static const u64 intel_perfmon_event_map[] =
Ingo Molnar241771e2008-12-03 10:39:53 +0100223{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200224 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
225 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
226 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
227 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
228 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
229 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
230 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
Ingo Molnar241771e2008-12-03 10:39:53 +0100231};
232
Stephane Eranian1da53e02010-01-18 10:58:01 +0200233static struct event_constraint intel_core_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200234{
Peter Zijlstra8c48e442010-01-29 13:25:31 +0100235 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
236 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
237 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
238 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
239 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
240 INTEL_EVENT_CONSTRAINT(0xc1, 0x1), /* FP_COMP_INSTR_RET */
241 EVENT_CONSTRAINT_END
242};
243
244static struct event_constraint intel_core2_event_constraints[] =
245{
Peter Zijlstra8433be12010-01-22 15:38:26 +0100246 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
247 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
248 INTEL_EVENT_CONSTRAINT(0x10, 0x1), /* FP_COMP_OPS_EXE */
249 INTEL_EVENT_CONSTRAINT(0x11, 0x2), /* FP_ASSIST */
250 INTEL_EVENT_CONSTRAINT(0x12, 0x2), /* MUL */
251 INTEL_EVENT_CONSTRAINT(0x13, 0x2), /* DIV */
252 INTEL_EVENT_CONSTRAINT(0x14, 0x1), /* CYCLES_DIV_BUSY */
253 INTEL_EVENT_CONSTRAINT(0x18, 0x1), /* IDLE_DURING_DIV */
254 INTEL_EVENT_CONSTRAINT(0x19, 0x2), /* DELAYED_BYPASS */
255 INTEL_EVENT_CONSTRAINT(0xa1, 0x1), /* RS_UOPS_DISPATCH_CYCLES */
256 INTEL_EVENT_CONSTRAINT(0xcb, 0x1), /* MEM_LOAD_RETIRED */
Stephane Eranianb6900812009-10-06 16:42:09 +0200257 EVENT_CONSTRAINT_END
258};
259
Stephane Eranian1da53e02010-01-18 10:58:01 +0200260static struct event_constraint intel_nehalem_event_constraints[] =
Stephane Eranianb6900812009-10-06 16:42:09 +0200261{
Peter Zijlstra452a3392010-01-27 23:07:48 +0100262 FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
263 FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
Peter Zijlstra8433be12010-01-22 15:38:26 +0100264 INTEL_EVENT_CONSTRAINT(0x40, 0x3), /* L1D_CACHE_LD */
265 INTEL_EVENT_CONSTRAINT(0x41, 0x3), /* L1D_CACHE_ST */
266 INTEL_EVENT_CONSTRAINT(0x42, 0x3), /* L1D_CACHE_LOCK */
267 INTEL_EVENT_CONSTRAINT(0x43, 0x3), /* L1D_ALL_REF */
Peter Zijlstra452a3392010-01-27 23:07:48 +0100268 INTEL_EVENT_CONSTRAINT(0x48, 0x3), /* L1D_PEND_MISS */
Peter Zijlstra8433be12010-01-22 15:38:26 +0100269 INTEL_EVENT_CONSTRAINT(0x4e, 0x3), /* L1D_PREFETCH */
Peter Zijlstra8433be12010-01-22 15:38:26 +0100270 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
Peter Zijlstra452a3392010-01-27 23:07:48 +0100271 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
272 EVENT_CONSTRAINT_END
273};
274
275static struct event_constraint intel_westmere_event_constraints[] =
276{
277 FIXED_EVENT_CONSTRAINT(0xc0, (0xf|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
278 FIXED_EVENT_CONSTRAINT(0x3c, (0xf|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
279 INTEL_EVENT_CONSTRAINT(0x51, 0x3), /* L1D */
280 INTEL_EVENT_CONSTRAINT(0x60, 0x1), /* OFFCORE_REQUESTS_OUTSTANDING */
281 INTEL_EVENT_CONSTRAINT(0x63, 0x3), /* CACHE_LOCK_CYCLES */
Stephane Eranian1da53e02010-01-18 10:58:01 +0200282 EVENT_CONSTRAINT_END
283};
284
285static struct event_constraint intel_gen_event_constraints[] =
286{
Peter Zijlstra8433be12010-01-22 15:38:26 +0100287 FIXED_EVENT_CONSTRAINT(0xc0, (0x3|(1ULL<<32))), /* INSTRUCTIONS_RETIRED */
288 FIXED_EVENT_CONSTRAINT(0x3c, (0x3|(1ULL<<33))), /* UNHALTED_CORE_CYCLES */
Stephane Eranianb6900812009-10-06 16:42:09 +0200289 EVENT_CONSTRAINT_END
290};
291
Ingo Molnardfc65092009-09-21 11:31:35 +0200292static u64 intel_pmu_event_map(int hw_event)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530293{
Ingo Molnardfc65092009-09-21 11:31:35 +0200294 return intel_perfmon_event_map[hw_event];
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530295}
Ingo Molnar241771e2008-12-03 10:39:53 +0100296
Ingo Molnar8326f442009-06-05 20:22:46 +0200297/*
Ingo Molnardfc65092009-09-21 11:31:35 +0200298 * Generalized hw caching related hw_event table, filled
Ingo Molnar8326f442009-06-05 20:22:46 +0200299 * in on a per model basis. A value of 0 means
Ingo Molnardfc65092009-09-21 11:31:35 +0200300 * 'not supported', -1 means 'hw_event makes no sense on
301 * this CPU', any other value means the raw hw_event
Ingo Molnar8326f442009-06-05 20:22:46 +0200302 * ID.
303 */
304
305#define C(x) PERF_COUNT_HW_CACHE_##x
306
307static u64 __read_mostly hw_cache_event_ids
308 [PERF_COUNT_HW_CACHE_MAX]
309 [PERF_COUNT_HW_CACHE_OP_MAX]
310 [PERF_COUNT_HW_CACHE_RESULT_MAX];
311
Peter Zijlstra452a3392010-01-27 23:07:48 +0100312static __initconst u64 westmere_hw_cache_event_ids
313 [PERF_COUNT_HW_CACHE_MAX]
314 [PERF_COUNT_HW_CACHE_OP_MAX]
315 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
316{
317 [ C(L1D) ] = {
318 [ C(OP_READ) ] = {
319 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
320 [ C(RESULT_MISS) ] = 0x0151, /* L1D.REPL */
321 },
322 [ C(OP_WRITE) ] = {
323 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
324 [ C(RESULT_MISS) ] = 0x0251, /* L1D.M_REPL */
325 },
326 [ C(OP_PREFETCH) ] = {
327 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
328 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
329 },
330 },
331 [ C(L1I ) ] = {
332 [ C(OP_READ) ] = {
333 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
334 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
335 },
336 [ C(OP_WRITE) ] = {
337 [ C(RESULT_ACCESS) ] = -1,
338 [ C(RESULT_MISS) ] = -1,
339 },
340 [ C(OP_PREFETCH) ] = {
341 [ C(RESULT_ACCESS) ] = 0x0,
342 [ C(RESULT_MISS) ] = 0x0,
343 },
344 },
345 [ C(LL ) ] = {
346 [ C(OP_READ) ] = {
347 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
348 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
349 },
350 [ C(OP_WRITE) ] = {
351 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
352 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
353 },
354 [ C(OP_PREFETCH) ] = {
355 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
356 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
357 },
358 },
359 [ C(DTLB) ] = {
360 [ C(OP_READ) ] = {
361 [ C(RESULT_ACCESS) ] = 0x010b, /* MEM_INST_RETIRED.LOADS */
362 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
363 },
364 [ C(OP_WRITE) ] = {
365 [ C(RESULT_ACCESS) ] = 0x020b, /* MEM_INST_RETURED.STORES */
366 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
367 },
368 [ C(OP_PREFETCH) ] = {
369 [ C(RESULT_ACCESS) ] = 0x0,
370 [ C(RESULT_MISS) ] = 0x0,
371 },
372 },
373 [ C(ITLB) ] = {
374 [ C(OP_READ) ] = {
375 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
376 [ C(RESULT_MISS) ] = 0x0185, /* ITLB_MISSES.ANY */
377 },
378 [ C(OP_WRITE) ] = {
379 [ C(RESULT_ACCESS) ] = -1,
380 [ C(RESULT_MISS) ] = -1,
381 },
382 [ C(OP_PREFETCH) ] = {
383 [ C(RESULT_ACCESS) ] = -1,
384 [ C(RESULT_MISS) ] = -1,
385 },
386 },
387 [ C(BPU ) ] = {
388 [ C(OP_READ) ] = {
389 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
390 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
391 },
392 [ C(OP_WRITE) ] = {
393 [ C(RESULT_ACCESS) ] = -1,
394 [ C(RESULT_MISS) ] = -1,
395 },
396 [ C(OP_PREFETCH) ] = {
397 [ C(RESULT_ACCESS) ] = -1,
398 [ C(RESULT_MISS) ] = -1,
399 },
400 },
401};
402
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900403static __initconst u64 nehalem_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200404 [PERF_COUNT_HW_CACHE_MAX]
405 [PERF_COUNT_HW_CACHE_OP_MAX]
406 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
407{
408 [ C(L1D) ] = {
409 [ C(OP_READ) ] = {
410 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
411 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
412 },
413 [ C(OP_WRITE) ] = {
414 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
415 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
416 },
417 [ C(OP_PREFETCH) ] = {
418 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
419 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
420 },
421 },
422 [ C(L1I ) ] = {
423 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800424 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
Ingo Molnar8326f442009-06-05 20:22:46 +0200425 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
426 },
427 [ C(OP_WRITE) ] = {
428 [ C(RESULT_ACCESS) ] = -1,
429 [ C(RESULT_MISS) ] = -1,
430 },
431 [ C(OP_PREFETCH) ] = {
432 [ C(RESULT_ACCESS) ] = 0x0,
433 [ C(RESULT_MISS) ] = 0x0,
434 },
435 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200436 [ C(LL ) ] = {
Ingo Molnar8326f442009-06-05 20:22:46 +0200437 [ C(OP_READ) ] = {
438 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
439 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
440 },
441 [ C(OP_WRITE) ] = {
442 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
443 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
444 },
445 [ C(OP_PREFETCH) ] = {
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200446 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
447 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
Ingo Molnar8326f442009-06-05 20:22:46 +0200448 },
449 },
450 [ C(DTLB) ] = {
451 [ C(OP_READ) ] = {
452 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
453 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
454 },
455 [ C(OP_WRITE) ] = {
456 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
457 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
458 },
459 [ C(OP_PREFETCH) ] = {
460 [ C(RESULT_ACCESS) ] = 0x0,
461 [ C(RESULT_MISS) ] = 0x0,
462 },
463 },
464 [ C(ITLB) ] = {
465 [ C(OP_READ) ] = {
466 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
Yong Wangfecc8ac2009-06-09 21:15:53 +0800467 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
Ingo Molnar8326f442009-06-05 20:22:46 +0200468 },
469 [ C(OP_WRITE) ] = {
470 [ C(RESULT_ACCESS) ] = -1,
471 [ C(RESULT_MISS) ] = -1,
472 },
473 [ C(OP_PREFETCH) ] = {
474 [ C(RESULT_ACCESS) ] = -1,
475 [ C(RESULT_MISS) ] = -1,
476 },
477 },
478 [ C(BPU ) ] = {
479 [ C(OP_READ) ] = {
480 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
481 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
482 },
483 [ C(OP_WRITE) ] = {
484 [ C(RESULT_ACCESS) ] = -1,
485 [ C(RESULT_MISS) ] = -1,
486 },
487 [ C(OP_PREFETCH) ] = {
488 [ C(RESULT_ACCESS) ] = -1,
489 [ C(RESULT_MISS) ] = -1,
490 },
491 },
492};
493
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900494static __initconst u64 core2_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200495 [PERF_COUNT_HW_CACHE_MAX]
496 [PERF_COUNT_HW_CACHE_OP_MAX]
497 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
498{
Thomas Gleixner0312af82009-06-08 07:42:04 +0200499 [ C(L1D) ] = {
500 [ C(OP_READ) ] = {
501 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
502 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
503 },
504 [ C(OP_WRITE) ] = {
505 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
506 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
507 },
508 [ C(OP_PREFETCH) ] = {
509 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
510 [ C(RESULT_MISS) ] = 0,
511 },
512 },
513 [ C(L1I ) ] = {
514 [ C(OP_READ) ] = {
515 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
516 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
517 },
518 [ C(OP_WRITE) ] = {
519 [ C(RESULT_ACCESS) ] = -1,
520 [ C(RESULT_MISS) ] = -1,
521 },
522 [ C(OP_PREFETCH) ] = {
523 [ C(RESULT_ACCESS) ] = 0,
524 [ C(RESULT_MISS) ] = 0,
525 },
526 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200527 [ C(LL ) ] = {
Thomas Gleixner0312af82009-06-08 07:42:04 +0200528 [ C(OP_READ) ] = {
529 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
530 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
531 },
532 [ C(OP_WRITE) ] = {
533 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
534 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
535 },
536 [ C(OP_PREFETCH) ] = {
537 [ C(RESULT_ACCESS) ] = 0,
538 [ C(RESULT_MISS) ] = 0,
539 },
540 },
541 [ C(DTLB) ] = {
542 [ C(OP_READ) ] = {
543 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
544 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
545 },
546 [ C(OP_WRITE) ] = {
547 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
548 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
549 },
550 [ C(OP_PREFETCH) ] = {
551 [ C(RESULT_ACCESS) ] = 0,
552 [ C(RESULT_MISS) ] = 0,
553 },
554 },
555 [ C(ITLB) ] = {
556 [ C(OP_READ) ] = {
557 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
558 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
559 },
560 [ C(OP_WRITE) ] = {
561 [ C(RESULT_ACCESS) ] = -1,
562 [ C(RESULT_MISS) ] = -1,
563 },
564 [ C(OP_PREFETCH) ] = {
565 [ C(RESULT_ACCESS) ] = -1,
566 [ C(RESULT_MISS) ] = -1,
567 },
568 },
569 [ C(BPU ) ] = {
570 [ C(OP_READ) ] = {
571 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
572 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
573 },
574 [ C(OP_WRITE) ] = {
575 [ C(RESULT_ACCESS) ] = -1,
576 [ C(RESULT_MISS) ] = -1,
577 },
578 [ C(OP_PREFETCH) ] = {
579 [ C(RESULT_ACCESS) ] = -1,
580 [ C(RESULT_MISS) ] = -1,
581 },
582 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200583};
584
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900585static __initconst u64 atom_hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +0200586 [PERF_COUNT_HW_CACHE_MAX]
587 [PERF_COUNT_HW_CACHE_OP_MAX]
588 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
589{
Thomas Gleixnerad689222009-06-08 09:30:41 +0200590 [ C(L1D) ] = {
591 [ C(OP_READ) ] = {
592 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
593 [ C(RESULT_MISS) ] = 0,
594 },
595 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800596 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200597 [ C(RESULT_MISS) ] = 0,
598 },
599 [ C(OP_PREFETCH) ] = {
600 [ C(RESULT_ACCESS) ] = 0x0,
601 [ C(RESULT_MISS) ] = 0,
602 },
603 },
604 [ C(L1I ) ] = {
605 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800606 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
607 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200608 },
609 [ C(OP_WRITE) ] = {
610 [ C(RESULT_ACCESS) ] = -1,
611 [ C(RESULT_MISS) ] = -1,
612 },
613 [ C(OP_PREFETCH) ] = {
614 [ C(RESULT_ACCESS) ] = 0,
615 [ C(RESULT_MISS) ] = 0,
616 },
617 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200618 [ C(LL ) ] = {
Thomas Gleixnerad689222009-06-08 09:30:41 +0200619 [ C(OP_READ) ] = {
620 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
621 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
622 },
623 [ C(OP_WRITE) ] = {
624 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
625 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
626 },
627 [ C(OP_PREFETCH) ] = {
628 [ C(RESULT_ACCESS) ] = 0,
629 [ C(RESULT_MISS) ] = 0,
630 },
631 },
632 [ C(DTLB) ] = {
633 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800634 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200635 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
636 },
637 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800638 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200639 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
640 },
641 [ C(OP_PREFETCH) ] = {
642 [ C(RESULT_ACCESS) ] = 0,
643 [ C(RESULT_MISS) ] = 0,
644 },
645 },
646 [ C(ITLB) ] = {
647 [ C(OP_READ) ] = {
648 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
649 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
650 },
651 [ C(OP_WRITE) ] = {
652 [ C(RESULT_ACCESS) ] = -1,
653 [ C(RESULT_MISS) ] = -1,
654 },
655 [ C(OP_PREFETCH) ] = {
656 [ C(RESULT_ACCESS) ] = -1,
657 [ C(RESULT_MISS) ] = -1,
658 },
659 },
660 [ C(BPU ) ] = {
661 [ C(OP_READ) ] = {
662 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
663 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
664 },
665 [ C(OP_WRITE) ] = {
666 [ C(RESULT_ACCESS) ] = -1,
667 [ C(RESULT_MISS) ] = -1,
668 },
669 [ C(OP_PREFETCH) ] = {
670 [ C(RESULT_ACCESS) ] = -1,
671 [ C(RESULT_MISS) ] = -1,
672 },
673 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200674};
675
Ingo Molnardfc65092009-09-21 11:31:35 +0200676static u64 intel_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100677{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100678#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
679#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200680#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
681#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
Peter Zijlstrafe9081c2009-10-08 11:56:07 +0200682#define CORE_EVNTSEL_REG_MASK 0xFF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100683
Ingo Molnar128f0482009-06-03 22:19:36 +0200684#define CORE_EVNTSEL_MASK \
Stephane Eranian1da53e02010-01-18 10:58:01 +0200685 (INTEL_ARCH_EVTSEL_MASK | \
686 INTEL_ARCH_UNIT_MASK | \
687 INTEL_ARCH_EDGE_MASK | \
688 INTEL_ARCH_INV_MASK | \
689 INTEL_ARCH_CNT_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100690
Ingo Molnardfc65092009-09-21 11:31:35 +0200691 return hw_event & CORE_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100692}
693
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +0900694static __initconst u64 amd_hw_cache_event_ids
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200695 [PERF_COUNT_HW_CACHE_MAX]
696 [PERF_COUNT_HW_CACHE_OP_MAX]
697 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
698{
699 [ C(L1D) ] = {
700 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530701 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
702 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200703 },
704 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputd9f2a5e2009-06-20 13:19:25 +0530705 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200706 [ C(RESULT_MISS) ] = 0,
707 },
708 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530709 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
710 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200711 },
712 },
713 [ C(L1I ) ] = {
714 [ C(OP_READ) ] = {
715 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
716 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
717 },
718 [ C(OP_WRITE) ] = {
719 [ C(RESULT_ACCESS) ] = -1,
720 [ C(RESULT_MISS) ] = -1,
721 },
722 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530723 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200724 [ C(RESULT_MISS) ] = 0,
725 },
726 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200727 [ C(LL ) ] = {
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200728 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530729 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
730 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200731 },
732 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530733 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200734 [ C(RESULT_MISS) ] = 0,
735 },
736 [ C(OP_PREFETCH) ] = {
737 [ C(RESULT_ACCESS) ] = 0,
738 [ C(RESULT_MISS) ] = 0,
739 },
740 },
741 [ C(DTLB) ] = {
742 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530743 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
744 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200745 },
746 [ C(OP_WRITE) ] = {
747 [ C(RESULT_ACCESS) ] = 0,
748 [ C(RESULT_MISS) ] = 0,
749 },
750 [ C(OP_PREFETCH) ] = {
751 [ C(RESULT_ACCESS) ] = 0,
752 [ C(RESULT_MISS) ] = 0,
753 },
754 },
755 [ C(ITLB) ] = {
756 [ C(OP_READ) ] = {
757 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
758 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
759 },
760 [ C(OP_WRITE) ] = {
761 [ C(RESULT_ACCESS) ] = -1,
762 [ C(RESULT_MISS) ] = -1,
763 },
764 [ C(OP_PREFETCH) ] = {
765 [ C(RESULT_ACCESS) ] = -1,
766 [ C(RESULT_MISS) ] = -1,
767 },
768 },
769 [ C(BPU ) ] = {
770 [ C(OP_READ) ] = {
771 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
772 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
773 },
774 [ C(OP_WRITE) ] = {
775 [ C(RESULT_ACCESS) ] = -1,
776 [ C(RESULT_MISS) ] = -1,
777 },
778 [ C(OP_PREFETCH) ] = {
779 [ C(RESULT_ACCESS) ] = -1,
780 [ C(RESULT_MISS) ] = -1,
781 },
782 },
783};
784
Ingo Molnar241771e2008-12-03 10:39:53 +0100785/*
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530786 * AMD Performance Monitor K7 and later.
787 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100788static const u64 amd_perfmon_event_map[] =
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530789{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200790 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
791 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
792 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
793 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
794 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
795 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530796};
797
Ingo Molnardfc65092009-09-21 11:31:35 +0200798static u64 amd_pmu_event_map(int hw_event)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530799{
Ingo Molnardfc65092009-09-21 11:31:35 +0200800 return amd_perfmon_event_map[hw_event];
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530801}
802
Ingo Molnardfc65092009-09-21 11:31:35 +0200803static u64 amd_pmu_raw_event(u64 hw_event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100804{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100805#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
806#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200807#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
808#define K7_EVNTSEL_INV_MASK 0x000800000ULL
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200809#define K7_EVNTSEL_REG_MASK 0x0FF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100810
811#define K7_EVNTSEL_MASK \
812 (K7_EVNTSEL_EVENT_MASK | \
813 K7_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200814 K7_EVNTSEL_EDGE_MASK | \
815 K7_EVNTSEL_INV_MASK | \
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200816 K7_EVNTSEL_REG_MASK)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100817
Ingo Molnardfc65092009-09-21 11:31:35 +0200818 return hw_event & K7_EVNTSEL_MASK;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100819}
820
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530821/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200822 * Propagate event elapsed time into the generic event.
823 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +0100824 * Returns the delta events processed.
825 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200826static u64
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200827x86_perf_event_update(struct perf_event *event,
828 struct hw_perf_event *hwc, int idx)
Ingo Molnaree060942008-12-13 09:00:03 +0100829{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200830 int shift = 64 - x86_pmu.event_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200831 u64 prev_raw_count, new_raw_count;
832 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100833
Markus Metzger30dd5682009-07-21 15:56:48 +0200834 if (idx == X86_PMC_IDX_FIXED_BTS)
835 return 0;
836
Ingo Molnaree060942008-12-13 09:00:03 +0100837 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200838 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +0100839 *
840 * Our tactic to handle this is to first atomically read and
841 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200842 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +0100843 */
844again:
845 prev_raw_count = atomic64_read(&hwc->prev_count);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200846 rdmsrl(hwc->event_base + idx, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100847
848 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
849 new_raw_count) != prev_raw_count)
850 goto again;
851
852 /*
853 * Now we have the new raw value and have updated the prev
854 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200855 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +0100856 *
857 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200858 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100859 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200860 delta = (new_raw_count << shift) - (prev_raw_count << shift);
861 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100862
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200863 atomic64_add(delta, &event->count);
Ingo Molnaree060942008-12-13 09:00:03 +0100864 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200865
866 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100867}
868
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200869static atomic_t active_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200870static DEFINE_MUTEX(pmc_reserve_mutex);
871
872static bool reserve_pmc_hardware(void)
873{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200874#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200875 int i;
876
877 if (nmi_watchdog == NMI_LOCAL_APIC)
878 disable_lapic_nmi_watchdog();
879
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200880 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200881 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200882 goto perfctr_fail;
883 }
884
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200885 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200886 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200887 goto eventsel_fail;
888 }
Ingo Molnar04da8a42009-08-11 10:40:08 +0200889#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200890
891 return true;
892
Ingo Molnar04da8a42009-08-11 10:40:08 +0200893#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200894eventsel_fail:
895 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200896 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200897
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200898 i = x86_pmu.num_events;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200899
900perfctr_fail:
901 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200902 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200903
904 if (nmi_watchdog == NMI_LOCAL_APIC)
905 enable_lapic_nmi_watchdog();
906
907 return false;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200908#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200909}
910
911static void release_pmc_hardware(void)
912{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200913#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200914 int i;
915
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200916 for (i = 0; i < x86_pmu.num_events; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200917 release_perfctr_nmi(x86_pmu.perfctr + i);
918 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200919 }
920
921 if (nmi_watchdog == NMI_LOCAL_APIC)
922 enable_lapic_nmi_watchdog();
Ingo Molnar04da8a42009-08-11 10:40:08 +0200923#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200924}
925
Markus Metzger30dd5682009-07-21 15:56:48 +0200926static inline bool bts_available(void)
927{
928 return x86_pmu.enable_bts != NULL;
929}
930
931static inline void init_debug_store_on_cpu(int cpu)
932{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200933 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200934
935 if (!ds)
936 return;
937
938 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA,
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200939 (u32)((u64)(unsigned long)ds),
940 (u32)((u64)(unsigned long)ds >> 32));
Markus Metzger30dd5682009-07-21 15:56:48 +0200941}
942
943static inline void fini_debug_store_on_cpu(int cpu)
944{
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200945 if (!per_cpu(cpu_hw_events, cpu).ds)
Markus Metzger30dd5682009-07-21 15:56:48 +0200946 return;
947
948 wrmsr_on_cpu(cpu, MSR_IA32_DS_AREA, 0, 0);
949}
950
951static void release_bts_hardware(void)
952{
953 int cpu;
954
955 if (!bts_available())
956 return;
957
958 get_online_cpus();
959
960 for_each_online_cpu(cpu)
961 fini_debug_store_on_cpu(cpu);
962
963 for_each_possible_cpu(cpu) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200964 struct debug_store *ds = per_cpu(cpu_hw_events, cpu).ds;
Markus Metzger30dd5682009-07-21 15:56:48 +0200965
966 if (!ds)
967 continue;
968
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200969 per_cpu(cpu_hw_events, cpu).ds = NULL;
Markus Metzger30dd5682009-07-21 15:56:48 +0200970
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +0200971 kfree((void *)(unsigned long)ds->bts_buffer_base);
Markus Metzger30dd5682009-07-21 15:56:48 +0200972 kfree(ds);
973 }
974
975 put_online_cpus();
976}
977
978static int reserve_bts_hardware(void)
979{
980 int cpu, err = 0;
981
982 if (!bts_available())
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +0200983 return 0;
Markus Metzger30dd5682009-07-21 15:56:48 +0200984
985 get_online_cpus();
986
987 for_each_possible_cpu(cpu) {
988 struct debug_store *ds;
989 void *buffer;
990
991 err = -ENOMEM;
992 buffer = kzalloc(BTS_BUFFER_SIZE, GFP_KERNEL);
993 if (unlikely(!buffer))
994 break;
995
996 ds = kzalloc(sizeof(*ds), GFP_KERNEL);
997 if (unlikely(!ds)) {
998 kfree(buffer);
999 break;
1000 }
1001
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001002 ds->bts_buffer_base = (u64)(unsigned long)buffer;
Markus Metzger30dd5682009-07-21 15:56:48 +02001003 ds->bts_index = ds->bts_buffer_base;
1004 ds->bts_absolute_maximum =
1005 ds->bts_buffer_base + BTS_BUFFER_SIZE;
1006 ds->bts_interrupt_threshold =
1007 ds->bts_absolute_maximum - BTS_OVFL_TH;
1008
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001009 per_cpu(cpu_hw_events, cpu).ds = ds;
Markus Metzger30dd5682009-07-21 15:56:48 +02001010 err = 0;
1011 }
1012
1013 if (err)
1014 release_bts_hardware();
1015 else {
1016 for_each_online_cpu(cpu)
1017 init_debug_store_on_cpu(cpu);
1018 }
1019
1020 put_online_cpus();
1021
1022 return err;
1023}
1024
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001025static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001026{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001027 if (atomic_dec_and_mutex_lock(&active_events, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001028 release_pmc_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +02001029 release_bts_hardware();
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001030 mutex_unlock(&pmc_reserve_mutex);
1031 }
1032}
1033
Robert Richter85cf9db2009-04-29 12:47:20 +02001034static inline int x86_pmu_initialized(void)
1035{
1036 return x86_pmu.handle_irq != NULL;
1037}
1038
Ingo Molnar8326f442009-06-05 20:22:46 +02001039static inline int
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001040set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event_attr *attr)
Ingo Molnar8326f442009-06-05 20:22:46 +02001041{
1042 unsigned int cache_type, cache_op, cache_result;
1043 u64 config, val;
1044
1045 config = attr->config;
1046
1047 cache_type = (config >> 0) & 0xff;
1048 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
1049 return -EINVAL;
1050
1051 cache_op = (config >> 8) & 0xff;
1052 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
1053 return -EINVAL;
1054
1055 cache_result = (config >> 16) & 0xff;
1056 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
1057 return -EINVAL;
1058
1059 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
1060
1061 if (val == 0)
1062 return -ENOENT;
1063
1064 if (val == -1)
1065 return -EINVAL;
1066
1067 hwc->config |= val;
1068
1069 return 0;
1070}
1071
Markus Metzger30dd5682009-07-21 15:56:48 +02001072static void intel_pmu_enable_bts(u64 config)
1073{
1074 unsigned long debugctlmsr;
1075
1076 debugctlmsr = get_debugctlmsr();
1077
1078 debugctlmsr |= X86_DEBUGCTL_TR;
1079 debugctlmsr |= X86_DEBUGCTL_BTS;
1080 debugctlmsr |= X86_DEBUGCTL_BTINT;
1081
1082 if (!(config & ARCH_PERFMON_EVENTSEL_OS))
1083 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_OS;
1084
1085 if (!(config & ARCH_PERFMON_EVENTSEL_USR))
1086 debugctlmsr |= X86_DEBUGCTL_BTS_OFF_USR;
1087
1088 update_debugctlmsr(debugctlmsr);
1089}
1090
1091static void intel_pmu_disable_bts(void)
1092{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001093 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001094 unsigned long debugctlmsr;
1095
1096 if (!cpuc->ds)
1097 return;
1098
1099 debugctlmsr = get_debugctlmsr();
1100
1101 debugctlmsr &=
1102 ~(X86_DEBUGCTL_TR | X86_DEBUGCTL_BTS | X86_DEBUGCTL_BTINT |
1103 X86_DEBUGCTL_BTS_OFF_OS | X86_DEBUGCTL_BTS_OFF_USR);
1104
1105 update_debugctlmsr(debugctlmsr);
1106}
1107
Ingo Molnaree060942008-12-13 09:00:03 +01001108/*
Peter Zijlstra0d486962009-06-02 19:22:16 +02001109 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +01001110 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001111static int __hw_perf_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001112{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001113 struct perf_event_attr *attr = &event->attr;
1114 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001115 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001116 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +01001117
Robert Richter85cf9db2009-04-29 12:47:20 +02001118 if (!x86_pmu_initialized())
1119 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +01001120
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001121 err = 0;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001122 if (!atomic_inc_not_zero(&active_events)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001123 mutex_lock(&pmc_reserve_mutex);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001124 if (atomic_read(&active_events) == 0) {
Markus Metzger30dd5682009-07-21 15:56:48 +02001125 if (!reserve_pmc_hardware())
1126 err = -EBUSY;
1127 else
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001128 err = reserve_bts_hardware();
Markus Metzger30dd5682009-07-21 15:56:48 +02001129 }
1130 if (!err)
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001131 atomic_inc(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001132 mutex_unlock(&pmc_reserve_mutex);
1133 }
1134 if (err)
1135 return err;
1136
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001137 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02001138
Ingo Molnar241771e2008-12-03 10:39:53 +01001139 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001140 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +01001141 * (keep 'enabled' bit clear for now)
1142 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001143 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +01001144
Stephane Eranianb6900812009-10-06 16:42:09 +02001145 hwc->idx = -1;
Stephane Eranian447a1942010-02-01 14:50:01 +02001146 hwc->last_cpu = -1;
1147 hwc->last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +02001148
Ingo Molnar241771e2008-12-03 10:39:53 +01001149 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001150 * Count user and OS events unless requested not to.
1151 */
Peter Zijlstra0d486962009-06-02 19:22:16 +02001152 if (!attr->exclude_user)
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001153 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
Peter Zijlstra0d486962009-06-02 19:22:16 +02001154 if (!attr->exclude_kernel)
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001155 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
1156
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001157 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +02001158 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001159 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001160 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001161 } else {
1162 /*
1163 * If we have a PMU initialized but no APIC
1164 * interrupts, we cannot sample hardware
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001165 * events (user-space has to fall back and
1166 * sample via a hrtimer based software event):
Ingo Molnar04da8a42009-08-11 10:40:08 +02001167 */
1168 if (!x86_pmu.apic)
1169 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +02001170 }
Ingo Molnard2517a42009-05-17 10:04:45 +02001171
Ingo Molnar241771e2008-12-03 10:39:53 +01001172 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001173 * Raw hw_event type provide the config in the hw_event structure
Ingo Molnar241771e2008-12-03 10:39:53 +01001174 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +02001175 if (attr->type == PERF_TYPE_RAW) {
1176 hwc->config |= x86_pmu.raw_event(attr->config);
Ingo Molnar8326f442009-06-05 20:22:46 +02001177 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001178 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001179
Ingo Molnar8326f442009-06-05 20:22:46 +02001180 if (attr->type == PERF_TYPE_HW_CACHE)
1181 return set_ext_hw_attr(hwc, attr);
1182
1183 if (attr->config >= x86_pmu.max_events)
1184 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001185
Ingo Molnar8326f442009-06-05 20:22:46 +02001186 /*
1187 * The generic map:
1188 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001189 config = x86_pmu.event_map(attr->config);
1190
1191 if (config == 0)
1192 return -ENOENT;
1193
1194 if (config == -1LL)
1195 return -EINVAL;
1196
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001197 /*
1198 * Branch tracing:
1199 */
1200 if ((attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS) &&
markus.t.metzger@intel.com16531922009-09-02 16:04:48 +02001201 (hwc->sample_period == 1)) {
1202 /* BTS is not supported by this architecture. */
1203 if (!bts_available())
1204 return -EOPNOTSUPP;
1205
1206 /* BTS is currently only allowed for user-mode. */
1207 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1208 return -EOPNOTSUPP;
1209 }
markus.t.metzger@intel.com747b50a2009-09-02 16:04:46 +02001210
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001211 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +02001212
Ingo Molnar241771e2008-12-03 10:39:53 +01001213 return 0;
1214}
1215
Vince Weaver11d15782009-07-08 17:46:14 -04001216static void p6_pmu_disable_all(void)
1217{
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001218 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001219
Vince Weaver11d15782009-07-08 17:46:14 -04001220 /* p6 only has one enable register */
1221 rdmsrl(MSR_P6_EVNTSEL0, val);
1222 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
1223 wrmsrl(MSR_P6_EVNTSEL0, val);
1224}
1225
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001226static void intel_pmu_disable_all(void)
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001227{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001228 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001229
Ingo Molnar862a1a52008-12-17 13:09:20 +01001230 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
Markus Metzger30dd5682009-07-21 15:56:48 +02001231
1232 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask))
1233 intel_pmu_disable_bts();
Thomas Gleixner4ac13292008-12-09 21:43:39 +01001234}
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301235
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001236static void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301237{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001238 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001239 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001240
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001241 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001242 u64 val;
1243
Robert Richter43f62012009-04-29 16:55:56 +02001244 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001245 continue;
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001246 rdmsrl(x86_pmu.eventsel + idx, val);
Robert Richter4295ee62009-04-29 12:47:01 +02001247 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
1248 continue;
1249 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001250 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301251 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301252}
1253
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001254void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301255{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001256 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1257
Robert Richter85cf9db2009-04-29 12:47:20 +02001258 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001259 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001260
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +01001261 if (!cpuc->enabled)
1262 return;
1263
1264 cpuc->n_added = 0;
1265 cpuc->enabled = 0;
1266 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +02001267
1268 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301269}
Ingo Molnar241771e2008-12-03 10:39:53 +01001270
Vince Weaver11d15782009-07-08 17:46:14 -04001271static void p6_pmu_enable_all(void)
1272{
Vince Weaver11d15782009-07-08 17:46:14 -04001273 unsigned long val;
1274
Vince Weaver11d15782009-07-08 17:46:14 -04001275 /* p6 only has one enable register */
1276 rdmsrl(MSR_P6_EVNTSEL0, val);
1277 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1278 wrmsrl(MSR_P6_EVNTSEL0, val);
1279}
1280
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001281static void intel_pmu_enable_all(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301282{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001283 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Markus Metzger30dd5682009-07-21 15:56:48 +02001284
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001285 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
Markus Metzger30dd5682009-07-21 15:56:48 +02001286
1287 if (test_bit(X86_PMC_IDX_FIXED_BTS, cpuc->active_mask)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001288 struct perf_event *event =
1289 cpuc->events[X86_PMC_IDX_FIXED_BTS];
Markus Metzger30dd5682009-07-21 15:56:48 +02001290
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001291 if (WARN_ON_ONCE(!event))
Markus Metzger30dd5682009-07-21 15:56:48 +02001292 return;
1293
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001294 intel_pmu_enable_bts(event->hw.config);
Markus Metzger30dd5682009-07-21 15:56:48 +02001295 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301296}
1297
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001298static void x86_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301299{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001300 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301301 int idx;
1302
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001303 for (idx = 0; idx < x86_pmu.num_events; idx++) {
1304 struct perf_event *event = cpuc->events[idx];
Robert Richter4295ee62009-04-29 12:47:01 +02001305 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001306
Robert Richter43f62012009-04-29 16:55:56 +02001307 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +02001308 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +02001309
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001310 val = event->hw.config;
Robert Richter4295ee62009-04-29 12:47:01 +02001311 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001312 wrmsrl(x86_pmu.eventsel + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301313 }
1314}
1315
Stephane Eranian1da53e02010-01-18 10:58:01 +02001316static const struct pmu pmu;
1317
1318static inline int is_x86_event(struct perf_event *event)
1319{
1320 return event->pmu == &pmu;
1321}
1322
1323static int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
1324{
Peter Zijlstra63b14642010-01-22 16:32:17 +01001325 struct event_constraint *c, *constraints[X86_PMC_IDX_MAX];
Stephane Eranian1da53e02010-01-18 10:58:01 +02001326 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Peter Zijlstrac933c1a2010-01-22 16:40:12 +01001327 int i, j, w, wmax, num = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001328 struct hw_perf_event *hwc;
1329
1330 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1331
1332 for (i = 0; i < n; i++) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01001333 constraints[i] =
1334 x86_pmu.get_event_constraints(cpuc, cpuc->event_list[i]);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001335 }
1336
1337 /*
Stephane Eranian81130702010-01-21 17:39:01 +02001338 * fastpath, try to reuse previous register
1339 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +01001340 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +02001341 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstra81269a02010-01-22 14:55:22 +01001342 c = constraints[i];
Stephane Eranian81130702010-01-21 17:39:01 +02001343
1344 /* never assigned */
1345 if (hwc->idx == -1)
1346 break;
1347
1348 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +01001349 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +02001350 break;
1351
1352 /* not already used */
1353 if (test_bit(hwc->idx, used_mask))
1354 break;
1355
Stephane Eranian81130702010-01-21 17:39:01 +02001356 set_bit(hwc->idx, used_mask);
1357 if (assign)
1358 assign[i] = hwc->idx;
1359 }
Peter Zijlstrac933c1a2010-01-22 16:40:12 +01001360 if (i == n)
Stephane Eranian81130702010-01-21 17:39:01 +02001361 goto done;
1362
1363 /*
1364 * begin slow path
1365 */
1366
1367 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
1368
1369 /*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001370 * weight = number of possible counters
1371 *
1372 * 1 = most constrained, only works on one counter
1373 * wmax = least constrained, works on any counter
1374 *
1375 * assign events to counters starting with most
1376 * constrained events.
1377 */
1378 wmax = x86_pmu.num_events;
1379
1380 /*
1381 * when fixed event counters are present,
1382 * wmax is incremented by 1 to account
1383 * for one more choice
1384 */
1385 if (x86_pmu.num_events_fixed)
1386 wmax++;
1387
Stephane Eranian81130702010-01-21 17:39:01 +02001388 for (w = 1, num = n; num && w <= wmax; w++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001389 /* for each event */
Stephane Eranian81130702010-01-21 17:39:01 +02001390 for (i = 0; num && i < n; i++) {
Peter Zijlstra81269a02010-01-22 14:55:22 +01001391 c = constraints[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +02001392 hwc = &cpuc->event_list[i]->hw;
1393
Peter Zijlstra272d30b2010-01-22 16:32:17 +01001394 if (c->weight != w)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001395 continue;
1396
Peter Zijlstra63b14642010-01-22 16:32:17 +01001397 for_each_bit(j, c->idxmsk, X86_PMC_IDX_MAX) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001398 if (!test_bit(j, used_mask))
1399 break;
1400 }
1401
1402 if (j == X86_PMC_IDX_MAX)
1403 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001404
Stephane Eranian81130702010-01-21 17:39:01 +02001405 set_bit(j, used_mask);
1406
Stephane Eranian1da53e02010-01-18 10:58:01 +02001407 if (assign)
1408 assign[i] = j;
1409 num--;
1410 }
1411 }
Stephane Eranian81130702010-01-21 17:39:01 +02001412done:
Stephane Eranian1da53e02010-01-18 10:58:01 +02001413 /*
1414 * scheduling failed or is just a simulation,
1415 * free resources if necessary
1416 */
1417 if (!assign || num) {
1418 for (i = 0; i < n; i++) {
1419 if (x86_pmu.put_event_constraints)
1420 x86_pmu.put_event_constraints(cpuc, cpuc->event_list[i]);
1421 }
1422 }
1423 return num ? -ENOSPC : 0;
1424}
1425
1426/*
1427 * dogrp: true if must collect siblings events (group)
1428 * returns total number of events and error code
1429 */
1430static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
1431{
1432 struct perf_event *event;
1433 int n, max_count;
1434
1435 max_count = x86_pmu.num_events + x86_pmu.num_events_fixed;
1436
1437 /* current number of events already accepted */
1438 n = cpuc->n_events;
1439
1440 if (is_x86_event(leader)) {
1441 if (n >= max_count)
1442 return -ENOSPC;
1443 cpuc->event_list[n] = leader;
1444 n++;
1445 }
1446 if (!dogrp)
1447 return n;
1448
1449 list_for_each_entry(event, &leader->sibling_list, group_entry) {
1450 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +02001451 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001452 continue;
1453
1454 if (n >= max_count)
1455 return -ENOSPC;
1456
1457 cpuc->event_list[n] = event;
1458 n++;
1459 }
1460 return n;
1461}
1462
Stephane Eranian1da53e02010-01-18 10:58:01 +02001463static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +02001464 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001465{
Stephane Eranian447a1942010-02-01 14:50:01 +02001466 struct hw_perf_event *hwc = &event->hw;
1467
1468 hwc->idx = cpuc->assign[i];
1469 hwc->last_cpu = smp_processor_id();
1470 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +02001471
1472 if (hwc->idx == X86_PMC_IDX_FIXED_BTS) {
1473 hwc->config_base = 0;
1474 hwc->event_base = 0;
1475 } else if (hwc->idx >= X86_PMC_IDX_FIXED) {
1476 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1477 /*
1478 * We set it so that event_base + idx in wrmsr/rdmsr maps to
1479 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1480 */
1481 hwc->event_base =
1482 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
1483 } else {
1484 hwc->config_base = x86_pmu.eventsel;
1485 hwc->event_base = x86_pmu.perfctr;
1486 }
1487}
1488
Stephane Eranian447a1942010-02-01 14:50:01 +02001489static inline int match_prev_assignment(struct hw_perf_event *hwc,
1490 struct cpu_hw_events *cpuc,
1491 int i)
1492{
1493 return hwc->idx == cpuc->assign[i] &&
1494 hwc->last_cpu == smp_processor_id() &&
1495 hwc->last_tag == cpuc->tags[i];
1496}
1497
Stephane Eraniand76a0812010-02-08 17:06:01 +02001498static void x86_pmu_stop(struct perf_event *event);
Peter Zijlstra2e841872010-01-25 15:58:43 +01001499
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001500void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +01001501{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001502 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1503 struct perf_event *event;
1504 struct hw_perf_event *hwc;
1505 int i;
1506
Robert Richter85cf9db2009-04-29 12:47:20 +02001507 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +01001508 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +01001509
1510 if (cpuc->enabled)
1511 return;
1512
Stephane Eranian1da53e02010-01-18 10:58:01 +02001513 if (cpuc->n_added) {
1514 /*
1515 * apply assignment obtained either from
1516 * hw_perf_group_sched_in() or x86_pmu_enable()
1517 *
1518 * step1: save events moving to new counters
1519 * step2: reprogram moved events into new counters
1520 */
1521 for (i = 0; i < cpuc->n_events; i++) {
1522
1523 event = cpuc->event_list[i];
1524 hwc = &event->hw;
1525
Stephane Eranian447a1942010-02-01 14:50:01 +02001526 /*
1527 * we can avoid reprogramming counter if:
1528 * - assigned same counter as last time
1529 * - running on same CPU as last time
1530 * - no other event has used the counter since
1531 */
1532 if (hwc->idx == -1 ||
1533 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +02001534 continue;
1535
Stephane Eraniand76a0812010-02-08 17:06:01 +02001536 x86_pmu_stop(event);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001537
1538 hwc->idx = -1;
1539 }
1540
1541 for (i = 0; i < cpuc->n_events; i++) {
1542
1543 event = cpuc->event_list[i];
1544 hwc = &event->hw;
1545
1546 if (hwc->idx == -1) {
Stephane Eranian447a1942010-02-01 14:50:01 +02001547 x86_assign_hw_event(event, cpuc, i);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001548 x86_perf_event_set_period(event, hwc, hwc->idx);
1549 }
1550 /*
1551 * need to mark as active because x86_pmu_disable()
Stephane Eranian447a1942010-02-01 14:50:01 +02001552 * clear active_mask and events[] yet it preserves
Stephane Eranian1da53e02010-01-18 10:58:01 +02001553 * idx
1554 */
1555 set_bit(hwc->idx, cpuc->active_mask);
1556 cpuc->events[hwc->idx] = event;
1557
1558 x86_pmu.enable(hwc, hwc->idx);
1559 perf_event_update_userpage(event);
1560 }
1561 cpuc->n_added = 0;
1562 perf_events_lapic_init();
1563 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +01001564
1565 cpuc->enabled = 1;
1566 barrier();
1567
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001568 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +01001569}
Ingo Molnaree060942008-12-13 09:00:03 +01001570
Robert Richter19d84da2009-04-29 12:47:25 +02001571static inline u64 intel_pmu_get_status(void)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001572{
1573 u64 status;
1574
1575 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1576
1577 return status;
1578}
1579
Robert Richterdee5d902009-04-29 12:47:07 +02001580static inline void intel_pmu_ack_status(u64 ack)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001581{
1582 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
1583}
1584
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001585static inline void __x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001586{
Vince Weaver11d15782009-07-08 17:46:14 -04001587 (void)checking_wrmsrl(hwc->config_base + idx,
Robert Richter7c90cc42009-04-29 12:47:18 +02001588 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001589}
1590
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001591static inline void x86_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001592{
Vince Weaver11d15782009-07-08 17:46:14 -04001593 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001594}
1595
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001596static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001597intel_pmu_disable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001598{
1599 int idx = __idx - X86_PMC_IDX_FIXED;
1600 u64 ctrl_val, mask;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001601
1602 mask = 0xfULL << (idx * 4);
1603
1604 rdmsrl(hwc->config_base, ctrl_val);
1605 ctrl_val &= ~mask;
Vince Weaver11d15782009-07-08 17:46:14 -04001606 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
1607}
1608
1609static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001610p6_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001611{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001612 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1613 u64 val = P6_NOP_EVENT;
Vince Weaver11d15782009-07-08 17:46:14 -04001614
Peter Zijlstra9c74fb52009-07-08 10:21:41 +02001615 if (cpuc->enabled)
1616 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
Vince Weaver11d15782009-07-08 17:46:14 -04001617
1618 (void)checking_wrmsrl(hwc->config_base + idx, val);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001619}
1620
1621static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001622intel_pmu_disable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001623{
Markus Metzger30dd5682009-07-21 15:56:48 +02001624 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
1625 intel_pmu_disable_bts();
1626 return;
1627 }
1628
Robert Richterd4369892009-04-29 12:47:19 +02001629 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1630 intel_pmu_disable_fixed(hwc, idx);
1631 return;
1632 }
1633
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001634 x86_pmu_disable_event(hwc, idx);
Robert Richterd4369892009-04-29 12:47:19 +02001635}
1636
Tejun Heo245b2e72009-06-24 15:13:48 +09001637static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001638
Ingo Molnaree060942008-12-13 09:00:03 +01001639/*
1640 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001641 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +01001642 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001643static int
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001644x86_perf_event_set_period(struct perf_event *event,
1645 struct hw_perf_event *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +01001646{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001647 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001648 s64 period = hwc->sample_period;
1649 int err, ret = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001650
Markus Metzger30dd5682009-07-21 15:56:48 +02001651 if (idx == X86_PMC_IDX_FIXED_BTS)
1652 return 0;
1653
Ingo Molnaree060942008-12-13 09:00:03 +01001654 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001655 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +01001656 */
1657 if (unlikely(left <= -period)) {
1658 left = period;
1659 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001660 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001661 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001662 }
1663
1664 if (unlikely(left <= 0)) {
1665 left += period;
1666 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001667 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001668 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001669 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001670 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001671 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001672 */
1673 if (unlikely(left < 2))
1674 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +01001675
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001676 if (left > x86_pmu.max_period)
1677 left = x86_pmu.max_period;
1678
Tejun Heo245b2e72009-06-24 15:13:48 +09001679 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +01001680
1681 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001682 * The hw event starts counting from this event offset,
Ingo Molnaree060942008-12-13 09:00:03 +01001683 * mark it to be able to extra future deltas:
1684 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001685 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +01001686
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001687 err = checking_wrmsrl(hwc->event_base + idx,
1688 (u64)(-left) & x86_pmu.event_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001689
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001690 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001691
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001692 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001693}
1694
1695static inline void
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001696intel_pmu_enable_fixed(struct hw_perf_event *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001697{
1698 int idx = __idx - X86_PMC_IDX_FIXED;
1699 u64 ctrl_val, bits, mask;
1700 int err;
1701
1702 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001703 * Enable IRQ generation (0x8),
1704 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1705 * if requested:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001706 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001707 bits = 0x8ULL;
1708 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1709 bits |= 0x2;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001710 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1711 bits |= 0x1;
Stephane Eranianb27d5152010-01-18 10:58:01 +02001712
1713 /*
1714 * ANY bit is supported in v3 and up
1715 */
1716 if (x86_pmu.version > 2 && hwc->config & ARCH_PERFMON_EVENTSEL_ANY)
1717 bits |= 0x4;
1718
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001719 bits <<= (idx * 4);
1720 mask = 0xfULL << (idx * 4);
1721
1722 rdmsrl(hwc->config_base, ctrl_val);
1723 ctrl_val &= ~mask;
1724 ctrl_val |= bits;
1725 err = checking_wrmsrl(hwc->config_base, ctrl_val);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001726}
1727
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001728static void p6_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Vince Weaver11d15782009-07-08 17:46:14 -04001729{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001730 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Peter Zijlstra984b8382009-07-10 09:59:56 +02001731 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001732
Peter Zijlstra984b8382009-07-10 09:59:56 +02001733 val = hwc->config;
Vince Weaver11d15782009-07-08 17:46:14 -04001734 if (cpuc->enabled)
Peter Zijlstra984b8382009-07-10 09:59:56 +02001735 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1736
1737 (void)checking_wrmsrl(hwc->config_base + idx, val);
Vince Weaver11d15782009-07-08 17:46:14 -04001738}
1739
1740
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001741static void intel_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001742{
Markus Metzger30dd5682009-07-21 15:56:48 +02001743 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS)) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001744 if (!__get_cpu_var(cpu_hw_events).enabled)
Markus Metzger30dd5682009-07-21 15:56:48 +02001745 return;
1746
1747 intel_pmu_enable_bts(hwc->config);
1748 return;
1749 }
1750
Robert Richter7c90cc42009-04-29 12:47:18 +02001751 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1752 intel_pmu_enable_fixed(hwc, idx);
1753 return;
1754 }
1755
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001756 __x86_pmu_enable_event(hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +02001757}
1758
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001759static void x86_pmu_enable_event(struct hw_perf_event *hwc, int idx)
Robert Richter7c90cc42009-04-29 12:47:18 +02001760{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001761 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richter7c90cc42009-04-29 12:47:18 +02001762 if (cpuc->enabled)
Peter Zijlstra8c48e442010-01-29 13:25:31 +01001763 __x86_pmu_enable_event(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001764}
1765
Ingo Molnaree060942008-12-13 09:00:03 +01001766/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001767 * activate a single event
1768 *
1769 * The event is added to the group of enabled events
1770 * but only if it can be scehduled with existing events.
1771 *
1772 * Called with PMU disabled. If successful and return value 1,
1773 * then guaranteed to call perf_enable() and hw_perf_enable()
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001774 */
1775static int x86_pmu_enable(struct perf_event *event)
1776{
1777 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001778 struct hw_perf_event *hwc;
1779 int assign[X86_PMC_IDX_MAX];
1780 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001781
Stephane Eranian1da53e02010-01-18 10:58:01 +02001782 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001783
Stephane Eranian1da53e02010-01-18 10:58:01 +02001784 n0 = cpuc->n_events;
1785 n = collect_events(cpuc, event, false);
1786 if (n < 0)
1787 return n;
Ingo Molnar53b441a2009-05-25 21:41:28 +02001788
Stephane Eranian1da53e02010-01-18 10:58:01 +02001789 ret = x86_schedule_events(cpuc, n, assign);
1790 if (ret)
1791 return ret;
1792 /*
1793 * copy new assignment, now we know it is possible
1794 * will be used by hw_perf_enable()
1795 */
1796 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +01001797
Stephane Eranian1da53e02010-01-18 10:58:01 +02001798 cpuc->n_events = n;
1799 cpuc->n_added = n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001800
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001801 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001802}
1803
Stephane Eraniand76a0812010-02-08 17:06:01 +02001804static int x86_pmu_start(struct perf_event *event)
1805{
1806 struct hw_perf_event *hwc = &event->hw;
1807
1808 if (hwc->idx == -1)
1809 return -EAGAIN;
1810
1811 x86_perf_event_set_period(event, hwc, hwc->idx);
1812 x86_pmu.enable(hwc, hwc->idx);
1813
1814 return 0;
1815}
1816
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001817static void x86_pmu_unthrottle(struct perf_event *event)
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001818{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001819 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1820 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001821
1822 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001823 cpuc->events[hwc->idx] != event))
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001824 return;
1825
1826 x86_pmu.enable(hwc, hwc->idx);
1827}
1828
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001829void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001830{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001831 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001832 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001833 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001834 int cpu, idx;
1835
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001836 if (!x86_pmu.num_events)
Ingo Molnar1e125672008-12-09 12:18:18 +01001837 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001838
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001839 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001840
1841 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001842 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001843
Robert Richterfaa28ae2009-04-29 12:47:13 +02001844 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301845 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1846 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1847 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1848 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001849
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301850 pr_info("\n");
1851 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1852 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1853 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1854 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301855 }
Stephane Eranian1da53e02010-01-18 10:58:01 +02001856 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001857
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001858 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001859 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1860 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001861
Tejun Heo245b2e72009-06-24 15:13:48 +09001862 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001863
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301864 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001865 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301866 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001867 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301868 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001869 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001870 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001871 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001872 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1873
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301874 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001875 cpu, idx, pmc_count);
1876 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001877 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001878}
1879
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001880static void intel_pmu_drain_bts_buffer(struct cpu_hw_events *cpuc)
Markus Metzger30dd5682009-07-21 15:56:48 +02001881{
1882 struct debug_store *ds = cpuc->ds;
1883 struct bts_record {
1884 u64 from;
1885 u64 to;
1886 u64 flags;
1887 };
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001888 struct perf_event *event = cpuc->events[X86_PMC_IDX_FIXED_BTS];
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001889 struct bts_record *at, *top;
Markus Metzger5622f292009-09-15 13:00:23 +02001890 struct perf_output_handle handle;
1891 struct perf_event_header header;
1892 struct perf_sample_data data;
1893 struct pt_regs regs;
Markus Metzger30dd5682009-07-21 15:56:48 +02001894
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001895 if (!event)
Markus Metzger30dd5682009-07-21 15:56:48 +02001896 return;
1897
1898 if (!ds)
1899 return;
1900
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001901 at = (struct bts_record *)(unsigned long)ds->bts_buffer_base;
1902 top = (struct bts_record *)(unsigned long)ds->bts_index;
Markus Metzger30dd5682009-07-21 15:56:48 +02001903
Markus Metzger5622f292009-09-15 13:00:23 +02001904 if (top <= at)
1905 return;
1906
markus.t.metzger@intel.com596da172009-09-02 16:04:47 +02001907 ds->bts_index = ds->bts_buffer_base;
1908
Markus Metzger30dd5682009-07-21 15:56:48 +02001909
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001910 data.period = event->hw.last_period;
Markus Metzger5622f292009-09-15 13:00:23 +02001911 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08001912 data.raw = NULL;
Markus Metzger5622f292009-09-15 13:00:23 +02001913 regs.ip = 0;
1914
1915 /*
1916 * Prepare a generic sample, i.e. fill in the invariant fields.
1917 * We will overwrite the from and to address before we output
1918 * the sample.
1919 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001920 perf_prepare_sample(&header, &data, event, &regs);
Markus Metzger5622f292009-09-15 13:00:23 +02001921
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001922 if (perf_output_begin(&handle, event,
Markus Metzger5622f292009-09-15 13:00:23 +02001923 header.size * (top - at), 1, 1))
1924 return;
1925
1926 for (; at < top; at++) {
1927 data.ip = at->from;
1928 data.addr = at->to;
1929
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001930 perf_output_sample(&handle, &header, &data, event);
Markus Metzger30dd5682009-07-21 15:56:48 +02001931 }
1932
Markus Metzger5622f292009-09-15 13:00:23 +02001933 perf_output_end(&handle);
Markus Metzger30dd5682009-07-21 15:56:48 +02001934
1935 /* There's new data available. */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001936 event->hw.interrupts++;
1937 event->pending_kill = POLL_IN;
Markus Metzger30dd5682009-07-21 15:56:48 +02001938}
1939
Stephane Eraniand76a0812010-02-08 17:06:01 +02001940static void x86_pmu_stop(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001941{
Stephane Eraniand76a0812010-02-08 17:06:01 +02001942 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001943 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001944 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001945
Robert Richter09534232009-04-29 12:47:16 +02001946 /*
1947 * Must be done before we disable, otherwise the nmi handler
1948 * could reenable again:
1949 */
Robert Richter43f62012009-04-29 16:55:56 +02001950 clear_bit(idx, cpuc->active_mask);
Robert Richterd4369892009-04-29 12:47:19 +02001951 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001952
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001953 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001954 * Drain the remaining delta count out of a event
Ingo Molnaree060942008-12-13 09:00:03 +01001955 * that we are disabling:
1956 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001957 x86_perf_event_update(event, hwc, idx);
Markus Metzger30dd5682009-07-21 15:56:48 +02001958
1959 /* Drain the remaining BTS records. */
Markus Metzger5622f292009-09-15 13:00:23 +02001960 if (unlikely(idx == X86_PMC_IDX_FIXED_BTS))
1961 intel_pmu_drain_bts_buffer(cpuc);
Markus Metzger30dd5682009-07-21 15:56:48 +02001962
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001963 cpuc->events[idx] = NULL;
Peter Zijlstra2e841872010-01-25 15:58:43 +01001964}
1965
1966static void x86_pmu_disable(struct perf_event *event)
1967{
1968 struct cpu_hw_events *cpuc = &__get_cpu_var(cpu_hw_events);
1969 int i;
1970
Stephane Eraniand76a0812010-02-08 17:06:01 +02001971 x86_pmu_stop(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001972
Stephane Eranian1da53e02010-01-18 10:58:01 +02001973 for (i = 0; i < cpuc->n_events; i++) {
1974 if (event == cpuc->event_list[i]) {
1975
1976 if (x86_pmu.put_event_constraints)
1977 x86_pmu.put_event_constraints(cpuc, event);
1978
1979 while (++i < cpuc->n_events)
1980 cpuc->event_list[i-1] = cpuc->event_list[i];
1981
1982 --cpuc->n_events;
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001983 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001984 }
1985 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001986 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001987}
1988
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001989/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001990 * Save and restart an expired event. Called by NMI contexts,
1991 * so it has to be careful about preempting normal event ops:
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001992 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001993static int intel_pmu_save_and_restart(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001994{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001995 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +01001996 int idx = hwc->idx;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001997 int ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001998
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001999 x86_perf_event_update(event, hwc, idx);
2000 ret = x86_perf_event_set_period(event, hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01002001
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002002 if (event->state == PERF_EVENT_STATE_ACTIVE)
2003 intel_pmu_enable_event(hwc, idx);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02002004
2005 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01002006}
2007
Ingo Molnaraaba9802009-05-26 08:10:00 +02002008static void intel_pmu_reset(void)
2009{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002010 struct debug_store *ds = __get_cpu_var(cpu_hw_events).ds;
Ingo Molnaraaba9802009-05-26 08:10:00 +02002011 unsigned long flags;
2012 int idx;
2013
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002014 if (!x86_pmu.num_events)
Ingo Molnaraaba9802009-05-26 08:10:00 +02002015 return;
2016
2017 local_irq_save(flags);
2018
2019 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
2020
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002021 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02002022 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
2023 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
2024 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002025 for (idx = 0; idx < x86_pmu.num_events_fixed; idx++) {
Ingo Molnaraaba9802009-05-26 08:10:00 +02002026 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
2027 }
Markus Metzger30dd5682009-07-21 15:56:48 +02002028 if (ds)
2029 ds->bts_index = ds->bts_buffer_base;
Ingo Molnaraaba9802009-05-26 08:10:00 +02002030
2031 local_irq_restore(flags);
2032}
2033
Ingo Molnar241771e2008-12-03 10:39:53 +01002034/*
2035 * This handler is triggered by the local APIC, so the APIC IRQ handling
2036 * rules apply:
2037 */
Yong Wanga3288102009-06-03 13:12:55 +08002038static int intel_pmu_handle_irq(struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01002039{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002040 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002041 struct cpu_hw_events *cpuc;
Vince Weaver11d15782009-07-08 17:46:14 -04002042 int bit, loops;
Mike Galbraith4b39fd92009-01-23 14:36:16 +01002043 u64 ack, status;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02002044
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002045 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08002046 data.raw = NULL;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002047
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002048 cpuc = &__get_cpu_var(cpu_hw_events);
Ingo Molnar43874d22008-12-09 12:23:59 +01002049
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002050 perf_disable();
Markus Metzger5622f292009-09-15 13:00:23 +02002051 intel_pmu_drain_bts_buffer(cpuc);
Robert Richter19d84da2009-04-29 12:47:25 +02002052 status = intel_pmu_get_status();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002053 if (!status) {
2054 perf_enable();
2055 return 0;
2056 }
Ingo Molnar87b9cf42008-12-08 14:20:16 +01002057
Ingo Molnar9029a5e2009-05-15 08:26:20 +02002058 loops = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01002059again:
Ingo Molnar9029a5e2009-05-15 08:26:20 +02002060 if (++loops > 100) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002061 WARN_ONCE(1, "perfevents: irq loop stuck!\n");
2062 perf_event_print_debug();
Ingo Molnaraaba9802009-05-26 08:10:00 +02002063 intel_pmu_reset();
2064 perf_enable();
Ingo Molnar9029a5e2009-05-15 08:26:20 +02002065 return 1;
2066 }
2067
Mike Galbraithd278c482009-02-09 07:38:50 +01002068 inc_irq_stat(apic_perf_irqs);
Ingo Molnar241771e2008-12-03 10:39:53 +01002069 ack = status;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01002070 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002071 struct perf_event *event = cpuc->events[bit];
Ingo Molnar241771e2008-12-03 10:39:53 +01002072
2073 clear_bit(bit, (unsigned long *) &status);
Robert Richter43f62012009-04-29 16:55:56 +02002074 if (!test_bit(bit, cpuc->active_mask))
Ingo Molnar241771e2008-12-03 10:39:53 +01002075 continue;
2076
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002077 if (!intel_pmu_save_and_restart(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02002078 continue;
2079
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002080 data.period = event->hw.last_period;
Peter Zijlstra60f916d2009-06-15 19:00:20 +02002081
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002082 if (perf_event_overflow(event, 1, &data, regs))
2083 intel_pmu_disable_event(&event->hw, bit);
Ingo Molnar241771e2008-12-03 10:39:53 +01002084 }
2085
Robert Richterdee5d902009-04-29 12:47:07 +02002086 intel_pmu_ack_status(ack);
Ingo Molnar241771e2008-12-03 10:39:53 +01002087
2088 /*
2089 * Repeat if there is more work to be done:
2090 */
Robert Richter19d84da2009-04-29 12:47:25 +02002091 status = intel_pmu_get_status();
Ingo Molnar241771e2008-12-03 10:39:53 +01002092 if (status)
2093 goto again;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002094
Peter Zijlstra48e22d52009-05-25 17:39:04 +02002095 perf_enable();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002096
2097 return 1;
Mike Galbraith1b023a92009-01-23 10:13:01 +01002098}
2099
Peter Zijlstra8c48e442010-01-29 13:25:31 +01002100static int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02002101{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002102 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002103 struct cpu_hw_events *cpuc;
2104 struct perf_event *event;
2105 struct hw_perf_event *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04002106 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02002107 u64 val;
2108
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002109 data.addr = 0;
Xiao Guangrong5e855db2009-12-10 17:08:54 +08002110 data.raw = NULL;
Peter Zijlstradf1a1322009-06-10 21:02:22 +02002111
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002112 cpuc = &__get_cpu_var(cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02002113
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002114 for (idx = 0; idx < x86_pmu.num_events; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02002115 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02002116 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002117
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002118 event = cpuc->events[idx];
2119 hwc = &event->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002120
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002121 val = x86_perf_event_update(event, hwc, idx);
2122 if (val & (1ULL << (x86_pmu.event_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02002123 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002124
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002125 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002126 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002127 */
2128 handled = 1;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002129 data.period = event->hw.last_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002130
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002131 if (!x86_perf_event_set_period(event, hwc, idx))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02002132 continue;
2133
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002134 if (perf_event_overflow(event, 1, &data, regs))
Peter Zijlstra8c48e442010-01-29 13:25:31 +01002135 x86_pmu.disable(hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +02002136 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02002137
Peter Zijlstra9e350de2009-06-10 21:34:59 +02002138 if (handled)
2139 inc_irq_stat(apic_perf_irqs);
2140
Robert Richtera29aa8a2009-04-29 12:47:21 +02002141 return handled;
2142}
Robert Richter39d81ea2009-04-29 12:47:05 +02002143
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002144void smp_perf_pending_interrupt(struct pt_regs *regs)
2145{
2146 irq_enter();
2147 ack_APIC_irq();
2148 inc_irq_stat(apic_pending_irqs);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002149 perf_event_do_pending();
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002150 irq_exit();
2151}
2152
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002153void set_perf_event_pending(void)
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002154{
Ingo Molnar04da8a42009-08-11 10:40:08 +02002155#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra7d428962009-09-23 11:03:37 +02002156 if (!x86_pmu.apic || !x86_pmu_initialized())
2157 return;
2158
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002159 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002160#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02002161}
2162
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002163void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01002164{
Ingo Molnar04da8a42009-08-11 10:40:08 +02002165#ifdef CONFIG_X86_LOCAL_APIC
2166 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01002167 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02002168
Ingo Molnar241771e2008-12-03 10:39:53 +01002169 /*
Yong Wangc323d952009-05-29 13:28:35 +08002170 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01002171 */
Yong Wangc323d952009-05-29 13:28:35 +08002172 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002173#endif
Ingo Molnar241771e2008-12-03 10:39:53 +01002174}
2175
2176static int __kprobes
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002177perf_event_nmi_handler(struct notifier_block *self,
Ingo Molnar241771e2008-12-03 10:39:53 +01002178 unsigned long cmd, void *__args)
2179{
2180 struct die_args *args = __args;
2181 struct pt_regs *regs;
2182
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002183 if (!atomic_read(&active_events))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02002184 return NOTIFY_DONE;
2185
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002186 switch (cmd) {
2187 case DIE_NMI:
2188 case DIE_NMI_IPI:
2189 break;
2190
2191 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01002192 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01002193 }
Ingo Molnar241771e2008-12-03 10:39:53 +01002194
2195 regs = args->regs;
2196
Ingo Molnar04da8a42009-08-11 10:40:08 +02002197#ifdef CONFIG_X86_LOCAL_APIC
Ingo Molnar241771e2008-12-03 10:39:53 +01002198 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02002199#endif
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002200 /*
2201 * Can't rely on the handled return value to say it was our NMI, two
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002202 * events could trigger 'simultaneously' raising two back-to-back NMIs.
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002203 *
2204 * If the first NMI handles both, the latter will be empty and daze
2205 * the CPU.
2206 */
Yong Wanga3288102009-06-03 13:12:55 +08002207 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01002208
Peter Zijlstraa4016a72009-05-14 14:52:17 +02002209 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01002210}
2211
Peter Zijlstra63b14642010-01-22 16:32:17 +01002212static struct event_constraint unconstrained;
2213
Peter Zijlstrac91e0f52010-01-22 15:25:59 +01002214static struct event_constraint bts_constraint =
2215 EVENT_CONSTRAINT(0, 1ULL << X86_PMC_IDX_FIXED_BTS, 0);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002216
Peter Zijlstra63b14642010-01-22 16:32:17 +01002217static struct event_constraint *
2218intel_special_constraints(struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02002219{
2220 unsigned int hw_event;
2221
2222 hw_event = event->hw.config & INTEL_ARCH_EVENT_MASK;
2223
2224 if (unlikely((hw_event ==
2225 x86_pmu.event_map(PERF_COUNT_HW_BRANCH_INSTRUCTIONS)) &&
2226 (event->hw.sample_period == 1))) {
2227
Peter Zijlstra63b14642010-01-22 16:32:17 +01002228 return &bts_constraint;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002229 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01002230 return NULL;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002231}
2232
Peter Zijlstra63b14642010-01-22 16:32:17 +01002233static struct event_constraint *
2234intel_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02002235{
Peter Zijlstra63b14642010-01-22 16:32:17 +01002236 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002237
Peter Zijlstra63b14642010-01-22 16:32:17 +01002238 c = intel_special_constraints(event);
2239 if (c)
2240 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002241
2242 if (x86_pmu.event_constraints) {
2243 for_each_event_constraint(c, x86_pmu.event_constraints) {
Peter Zijlstra63b14642010-01-22 16:32:17 +01002244 if ((event->hw.config & c->cmask) == c->code)
2245 return c;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002246 }
2247 }
Peter Zijlstra63b14642010-01-22 16:32:17 +01002248
2249 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002250}
2251
Peter Zijlstra63b14642010-01-22 16:32:17 +01002252static struct event_constraint *
2253amd_get_event_constraints(struct cpu_hw_events *cpuc, struct perf_event *event)
Stephane Eranian1da53e02010-01-18 10:58:01 +02002254{
Peter Zijlstra63b14642010-01-22 16:32:17 +01002255 return &unconstrained;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002256}
2257
2258static int x86_event_sched_in(struct perf_event *event,
2259 struct perf_cpu_context *cpuctx, int cpu)
2260{
2261 int ret = 0;
2262
2263 event->state = PERF_EVENT_STATE_ACTIVE;
2264 event->oncpu = cpu;
2265 event->tstamp_running += event->ctx->time - event->tstamp_stopped;
2266
2267 if (!is_x86_event(event))
2268 ret = event->pmu->enable(event);
2269
2270 if (!ret && !is_software_event(event))
2271 cpuctx->active_oncpu++;
2272
2273 if (!ret && event->attr.exclusive)
2274 cpuctx->exclusive = 1;
2275
2276 return ret;
2277}
2278
2279static void x86_event_sched_out(struct perf_event *event,
2280 struct perf_cpu_context *cpuctx, int cpu)
2281{
2282 event->state = PERF_EVENT_STATE_INACTIVE;
2283 event->oncpu = -1;
2284
2285 if (!is_x86_event(event))
2286 event->pmu->disable(event);
2287
2288 event->tstamp_running -= event->ctx->time - event->tstamp_stopped;
2289
2290 if (!is_software_event(event))
2291 cpuctx->active_oncpu--;
2292
2293 if (event->attr.exclusive || !cpuctx->active_oncpu)
2294 cpuctx->exclusive = 0;
2295}
2296
2297/*
2298 * Called to enable a whole group of events.
2299 * Returns 1 if the group was enabled, or -EAGAIN if it could not be.
2300 * Assumes the caller has disabled interrupts and has
2301 * frozen the PMU with hw_perf_save_disable.
2302 *
2303 * called with PMU disabled. If successful and return value 1,
2304 * then guaranteed to call perf_enable() and hw_perf_enable()
2305 */
2306int hw_perf_group_sched_in(struct perf_event *leader,
2307 struct perf_cpu_context *cpuctx,
2308 struct perf_event_context *ctx, int cpu)
2309{
2310 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
2311 struct perf_event *sub;
2312 int assign[X86_PMC_IDX_MAX];
2313 int n0, n1, ret;
2314
2315 /* n0 = total number of events */
2316 n0 = collect_events(cpuc, leader, true);
2317 if (n0 < 0)
2318 return n0;
2319
2320 ret = x86_schedule_events(cpuc, n0, assign);
2321 if (ret)
2322 return ret;
2323
2324 ret = x86_event_sched_in(leader, cpuctx, cpu);
2325 if (ret)
2326 return ret;
2327
2328 n1 = 1;
2329 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
Stephane Eranian81130702010-01-21 17:39:01 +02002330 if (sub->state > PERF_EVENT_STATE_OFF) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02002331 ret = x86_event_sched_in(sub, cpuctx, cpu);
2332 if (ret)
2333 goto undo;
2334 ++n1;
2335 }
2336 }
2337 /*
2338 * copy new assignment, now we know it is possible
2339 * will be used by hw_perf_enable()
2340 */
2341 memcpy(cpuc->assign, assign, n0*sizeof(int));
2342
2343 cpuc->n_events = n0;
2344 cpuc->n_added = n1;
2345 ctx->nr_active += n1;
2346
2347 /*
2348 * 1 means successful and events are active
2349 * This is not quite true because we defer
2350 * actual activation until hw_perf_enable() but
2351 * this way we* ensure caller won't try to enable
2352 * individual events
2353 */
2354 return 1;
2355undo:
2356 x86_event_sched_out(leader, cpuctx, cpu);
2357 n0 = 1;
2358 list_for_each_entry(sub, &leader->sibling_list, group_entry) {
2359 if (sub->state == PERF_EVENT_STATE_ACTIVE) {
2360 x86_event_sched_out(sub, cpuctx, cpu);
2361 if (++n0 == n1)
2362 break;
2363 }
2364 }
2365 return ret;
2366}
2367
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002368static __read_mostly struct notifier_block perf_event_nmi_notifier = {
2369 .notifier_call = perf_event_nmi_handler,
Mike Galbraith5b75af02009-02-04 17:11:34 +01002370 .next = NULL,
2371 .priority = 1
Ingo Molnar241771e2008-12-03 10:39:53 +01002372};
2373
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002374static __initconst struct x86_pmu p6_pmu = {
Vince Weaver11d15782009-07-08 17:46:14 -04002375 .name = "p6",
Peter Zijlstra8c48e442010-01-29 13:25:31 +01002376 .handle_irq = x86_pmu_handle_irq,
Vince Weaver11d15782009-07-08 17:46:14 -04002377 .disable_all = p6_pmu_disable_all,
2378 .enable_all = p6_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002379 .enable = p6_pmu_enable_event,
2380 .disable = p6_pmu_disable_event,
Vince Weaver11d15782009-07-08 17:46:14 -04002381 .eventsel = MSR_P6_EVNTSEL0,
2382 .perfctr = MSR_P6_PERFCTR0,
2383 .event_map = p6_pmu_event_map,
2384 .raw_event = p6_pmu_raw_event,
2385 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02002386 .apic = 1,
Vince Weaver11d15782009-07-08 17:46:14 -04002387 .max_period = (1ULL << 31) - 1,
2388 .version = 0,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002389 .num_events = 2,
Vince Weaver11d15782009-07-08 17:46:14 -04002390 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002391 * Events have 40 bits implemented. However they are designed such
Vince Weaver11d15782009-07-08 17:46:14 -04002392 * that bits [32-39] are sign extensions of bit 31. As such the
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002393 * effective width of a event for P6-like PMU is 32 bits only.
Vince Weaver11d15782009-07-08 17:46:14 -04002394 *
2395 * See IA-32 Intel Architecture Software developer manual Vol 3B
2396 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002397 .event_bits = 32,
2398 .event_mask = (1ULL << 32) - 1,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002399 .get_event_constraints = intel_get_event_constraints,
2400 .event_constraints = intel_p6_event_constraints
Vince Weaver11d15782009-07-08 17:46:14 -04002401};
2402
Peter Zijlstra8c48e442010-01-29 13:25:31 +01002403static __initconst struct x86_pmu core_pmu = {
2404 .name = "core",
2405 .handle_irq = x86_pmu_handle_irq,
2406 .disable_all = x86_pmu_disable_all,
2407 .enable_all = x86_pmu_enable_all,
2408 .enable = x86_pmu_enable_event,
2409 .disable = x86_pmu_disable_event,
2410 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2411 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
2412 .event_map = intel_pmu_event_map,
2413 .raw_event = intel_pmu_raw_event,
2414 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
2415 .apic = 1,
2416 /*
2417 * Intel PMCs cannot be accessed sanely above 32 bit width,
2418 * so we install an artificial 1<<31 period regardless of
2419 * the generic event period:
2420 */
2421 .max_period = (1ULL << 31) - 1,
2422 .get_event_constraints = intel_get_event_constraints,
2423 .event_constraints = intel_core_event_constraints,
2424};
2425
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002426static __initconst struct x86_pmu intel_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02002427 .name = "Intel",
Robert Richter39d81ea2009-04-29 12:47:05 +02002428 .handle_irq = intel_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02002429 .disable_all = intel_pmu_disable_all,
2430 .enable_all = intel_pmu_enable_all,
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002431 .enable = intel_pmu_enable_event,
2432 .disable = intel_pmu_disable_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302433 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
2434 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02002435 .event_map = intel_pmu_event_map,
2436 .raw_event = intel_pmu_raw_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302437 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02002438 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02002439 /*
2440 * Intel PMCs cannot be accessed sanely above 32 bit width,
2441 * so we install an artificial 1<<31 period regardless of
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002442 * the generic event period:
Robert Richterc619b8f2009-04-29 12:47:23 +02002443 */
2444 .max_period = (1ULL << 31) - 1,
Markus Metzger30dd5682009-07-21 15:56:48 +02002445 .enable_bts = intel_pmu_enable_bts,
2446 .disable_bts = intel_pmu_disable_bts,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002447 .get_event_constraints = intel_get_event_constraints
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302448};
2449
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002450static __initconst struct x86_pmu amd_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02002451 .name = "AMD",
Peter Zijlstra8c48e442010-01-29 13:25:31 +01002452 .handle_irq = x86_pmu_handle_irq,
2453 .disable_all = x86_pmu_disable_all,
2454 .enable_all = x86_pmu_enable_all,
2455 .enable = x86_pmu_enable_event,
2456 .disable = x86_pmu_disable_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302457 .eventsel = MSR_K7_EVNTSEL0,
2458 .perfctr = MSR_K7_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02002459 .event_map = amd_pmu_event_map,
2460 .raw_event = amd_pmu_raw_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302461 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002462 .num_events = 4,
2463 .event_bits = 48,
2464 .event_mask = (1ULL << 48) - 1,
Ingo Molnar04da8a42009-08-11 10:40:08 +02002465 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02002466 /* use highest bit to detect overflow */
2467 .max_period = (1ULL << 47) - 1,
Stephane Eranian1da53e02010-01-18 10:58:01 +02002468 .get_event_constraints = amd_get_event_constraints
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302469};
2470
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002471static __init int p6_pmu_init(void)
Vince Weaver11d15782009-07-08 17:46:14 -04002472{
Vince Weaver11d15782009-07-08 17:46:14 -04002473 switch (boot_cpu_data.x86_model) {
2474 case 1:
2475 case 3: /* Pentium Pro */
2476 case 5:
2477 case 6: /* Pentium II */
2478 case 7:
2479 case 8:
2480 case 11: /* Pentium III */
Vince Weaver11d15782009-07-08 17:46:14 -04002481 case 9:
2482 case 13:
Daniel Qarrasf1c6a582009-07-12 04:32:40 -07002483 /* Pentium M */
2484 break;
Vince Weaver11d15782009-07-08 17:46:14 -04002485 default:
2486 pr_cont("unsupported p6 CPU model %d ",
2487 boot_cpu_data.x86_model);
2488 return -ENODEV;
2489 }
2490
Ingo Molnar04da8a42009-08-11 10:40:08 +02002491 x86_pmu = p6_pmu;
Vince Weaver11d15782009-07-08 17:46:14 -04002492
Vince Weaver11d15782009-07-08 17:46:14 -04002493 return 0;
2494}
2495
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002496static __init int intel_pmu_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01002497{
Ingo Molnar703e9372008-12-17 10:51:15 +01002498 union cpuid10_edx edx;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01002499 union cpuid10_eax eax;
2500 unsigned int unused;
2501 unsigned int ebx;
Robert Richterfaa28ae2009-04-29 12:47:13 +02002502 int version;
Ingo Molnar241771e2008-12-03 10:39:53 +01002503
Vince Weaver11d15782009-07-08 17:46:14 -04002504 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
2505 /* check for P6 processor family */
2506 if (boot_cpu_data.x86 == 6) {
2507 return p6_pmu_init();
2508 } else {
Robert Richter72eae042009-04-29 12:47:10 +02002509 return -ENODEV;
Vince Weaver11d15782009-07-08 17:46:14 -04002510 }
2511 }
Robert Richterda1a7762009-04-29 12:46:58 +02002512
Ingo Molnar241771e2008-12-03 10:39:53 +01002513 /*
2514 * Check whether the Architectural PerfMon supports
Ingo Molnardfc65092009-09-21 11:31:35 +02002515 * Branch Misses Retired hw_event or not.
Ingo Molnar241771e2008-12-03 10:39:53 +01002516 */
Ingo Molnar703e9372008-12-17 10:51:15 +01002517 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
Ingo Molnar241771e2008-12-03 10:39:53 +01002518 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
Robert Richter72eae042009-04-29 12:47:10 +02002519 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +01002520
Robert Richterfaa28ae2009-04-29 12:47:13 +02002521 version = eax.split.version_id;
2522 if (version < 2)
Peter Zijlstra8c48e442010-01-29 13:25:31 +01002523 x86_pmu = core_pmu;
2524 else
2525 x86_pmu = intel_pmu;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01002526
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002527 x86_pmu.version = version;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002528 x86_pmu.num_events = eax.split.num_events;
2529 x86_pmu.event_bits = eax.split.bit_width;
2530 x86_pmu.event_mask = (1ULL << eax.split.bit_width) - 1;
Ingo Molnar066d7de2009-05-04 19:04:09 +02002531
2532 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002533 * Quirk: v2 perfmon does not report fixed-purpose events, so
2534 * assume at least 3 events:
Ingo Molnar066d7de2009-05-04 19:04:09 +02002535 */
Peter Zijlstra8c48e442010-01-29 13:25:31 +01002536 if (version > 1)
2537 x86_pmu.num_events_fixed = max((int)edx.split.num_events_fixed, 3);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302538
Ingo Molnar8326f442009-06-05 20:22:46 +02002539 /*
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002540 * Install the hw-cache-events table:
Ingo Molnar8326f442009-06-05 20:22:46 +02002541 */
2542 switch (boot_cpu_data.x86_model) {
Peter Zijlstra8c48e442010-01-29 13:25:31 +01002543 case 14: /* 65 nm core solo/duo, "Yonah" */
2544 pr_cont("Core events, ");
2545 break;
2546
Yong Wangdc810812009-06-10 17:06:12 +08002547 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
2548 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
2549 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
2550 case 29: /* six-core 45 nm xeon "Dunnington" */
Ingo Molnar8326f442009-06-05 20:22:46 +02002551 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002552 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002553
Peter Zijlstra8c48e442010-01-29 13:25:31 +01002554 x86_pmu.event_constraints = intel_core2_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002555 pr_cont("Core2 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002556 break;
Peter Zijlstra452a3392010-01-27 23:07:48 +01002557
2558 case 26: /* 45 nm nehalem, "Bloomfield" */
2559 case 30: /* 45 nm nehalem, "Lynnfield" */
Ingo Molnar8326f442009-06-05 20:22:46 +02002560 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002561 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002562
Stephane Eranian1da53e02010-01-18 10:58:01 +02002563 x86_pmu.event_constraints = intel_nehalem_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002564 pr_cont("Nehalem/Corei7 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002565 break;
2566 case 28:
2567 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02002568 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02002569
Stephane Eranian1da53e02010-01-18 10:58:01 +02002570 x86_pmu.event_constraints = intel_gen_event_constraints;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002571 pr_cont("Atom events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002572 break;
Peter Zijlstra452a3392010-01-27 23:07:48 +01002573
2574 case 37: /* 32 nm nehalem, "Clarkdale" */
2575 case 44: /* 32 nm nehalem, "Gulftown" */
2576 memcpy(hw_cache_event_ids, westmere_hw_cache_event_ids,
2577 sizeof(hw_cache_event_ids));
2578
2579 x86_pmu.event_constraints = intel_westmere_event_constraints;
2580 pr_cont("Westmere events, ");
2581 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002582 default:
2583 /*
2584 * default constraints for v2 and up
2585 */
2586 x86_pmu.event_constraints = intel_gen_event_constraints;
2587 pr_cont("generic architected perfmon, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02002588 }
Robert Richter72eae042009-04-29 12:47:10 +02002589 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302590}
2591
Hiroshi Shimamotodb48ccc2009-11-12 11:25:34 +09002592static __init int amd_pmu_init(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302593{
Jaswinder Singh Rajput4d2be122009-06-11 15:28:09 +05302594 /* Performance-monitoring supported from K7 and later: */
2595 if (boot_cpu_data.x86 < 6)
2596 return -ENODEV;
2597
Robert Richter4a06bd82009-04-29 12:47:11 +02002598 x86_pmu = amd_pmu;
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002599
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +05302600 /* Events are common for all AMDs */
2601 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
2602 sizeof(hw_cache_event_ids));
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02002603
Robert Richter72eae042009-04-29 12:47:10 +02002604 return 0;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302605}
2606
Cyrill Gorcunov12558032009-12-10 19:56:34 +03002607static void __init pmu_check_apic(void)
2608{
2609 if (cpu_has_apic)
2610 return;
2611
2612 x86_pmu.apic = 0;
2613 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
2614 pr_info("no hardware sampling interrupt available.\n");
2615}
2616
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002617void __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302618{
Robert Richter72eae042009-04-29 12:47:10 +02002619 int err;
2620
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002621 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002622
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302623 switch (boot_cpu_data.x86_vendor) {
2624 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02002625 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302626 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302627 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02002628 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05302629 break;
Robert Richter41389602009-04-29 12:47:00 +02002630 default:
2631 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302632 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002633 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002634 pr_cont("no PMU driver, software events only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302635 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002636 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05302637
Cyrill Gorcunov12558032009-12-10 19:56:34 +03002638 pmu_check_apic();
2639
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002640 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02002641
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002642 if (x86_pmu.num_events > X86_PMC_MAX_GENERIC) {
2643 WARN(1, KERN_ERR "hw perf events %d > max(%d), clipping!",
2644 x86_pmu.num_events, X86_PMC_MAX_GENERIC);
2645 x86_pmu.num_events = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01002646 }
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002647 perf_event_mask = (1 << x86_pmu.num_events) - 1;
2648 perf_max_events = x86_pmu.num_events;
Ingo Molnar241771e2008-12-03 10:39:53 +01002649
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002650 if (x86_pmu.num_events_fixed > X86_PMC_MAX_FIXED) {
2651 WARN(1, KERN_ERR "hw perf events fixed %d > max(%d), clipping!",
2652 x86_pmu.num_events_fixed, X86_PMC_MAX_FIXED);
2653 x86_pmu.num_events_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01002654 }
Ingo Molnar241771e2008-12-03 10:39:53 +01002655
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002656 perf_event_mask |=
2657 ((1LL << x86_pmu.num_events_fixed)-1) << X86_PMC_IDX_FIXED;
2658 x86_pmu.intel_ctrl = perf_event_mask;
Ingo Molnar862a1a52008-12-17 13:09:20 +01002659
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002660 perf_events_lapic_init();
2661 register_die_notifier(&perf_event_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02002662
Peter Zijlstra63b14642010-01-22 16:32:17 +01002663 unconstrained = (struct event_constraint)
Peter Zijlstrafce877e2010-01-29 13:25:12 +01002664 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_events) - 1,
2665 0, x86_pmu.num_events);
Peter Zijlstra63b14642010-01-22 16:32:17 +01002666
Ingo Molnar57c0c152009-09-21 12:20:38 +02002667 pr_info("... version: %d\n", x86_pmu.version);
2668 pr_info("... bit width: %d\n", x86_pmu.event_bits);
2669 pr_info("... generic registers: %d\n", x86_pmu.num_events);
2670 pr_info("... value mask: %016Lx\n", x86_pmu.event_mask);
2671 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
2672 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_events_fixed);
2673 pr_info("... event mask: %016Lx\n", perf_event_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01002674}
Ingo Molnar621a01e2008-12-11 12:46:46 +01002675
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002676static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01002677{
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002678 x86_perf_event_update(event, &event->hw, event->hw.idx);
Ingo Molnaree060942008-12-13 09:00:03 +01002679}
2680
Robert Richter4aeb0b42009-04-29 12:47:03 +02002681static const struct pmu pmu = {
2682 .enable = x86_pmu_enable,
2683 .disable = x86_pmu_disable,
Stephane Eraniand76a0812010-02-08 17:06:01 +02002684 .start = x86_pmu_start,
2685 .stop = x86_pmu_stop,
Robert Richter4aeb0b42009-04-29 12:47:03 +02002686 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02002687 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01002688};
2689
Stephane Eranian1da53e02010-01-18 10:58:01 +02002690/*
2691 * validate a single event group
2692 *
2693 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01002694 * - check events are compatible which each other
2695 * - events do not compete for the same counter
2696 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02002697 *
2698 * validation ensures the group can be loaded onto the
2699 * PMU if it was the only group available.
2700 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002701static int validate_group(struct perf_event *event)
2702{
Stephane Eranian1da53e02010-01-18 10:58:01 +02002703 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01002704 struct cpu_hw_events *fake_cpuc;
2705 int ret, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002706
Peter Zijlstra502568d2010-01-22 14:35:46 +01002707 ret = -ENOMEM;
2708 fake_cpuc = kmalloc(sizeof(*fake_cpuc), GFP_KERNEL | __GFP_ZERO);
2709 if (!fake_cpuc)
2710 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002711
Stephane Eranian1da53e02010-01-18 10:58:01 +02002712 /*
2713 * the event is not yet connected with its
2714 * siblings therefore we must first collect
2715 * existing siblings, then add the new event
2716 * before we can simulate the scheduling
2717 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01002718 ret = -ENOSPC;
2719 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002720 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01002721 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002722
Peter Zijlstra502568d2010-01-22 14:35:46 +01002723 fake_cpuc->n_events = n;
2724 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002725 if (n < 0)
Peter Zijlstra502568d2010-01-22 14:35:46 +01002726 goto out_free;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002727
Peter Zijlstra502568d2010-01-22 14:35:46 +01002728 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002729
Peter Zijlstra502568d2010-01-22 14:35:46 +01002730 ret = x86_schedule_events(fake_cpuc, n, NULL);
2731
2732out_free:
2733 kfree(fake_cpuc);
2734out:
2735 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002736}
2737
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002738const struct pmu *hw_perf_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01002739{
Stephane Eranian81130702010-01-21 17:39:01 +02002740 const struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002741 int err;
2742
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002743 err = __hw_perf_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002744 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02002745 /*
2746 * we temporarily connect event to its pmu
2747 * such that validate_group() can classify
2748 * it as an x86 event using is_x86_event()
2749 */
2750 tmp = event->pmu;
2751 event->pmu = &pmu;
2752
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002753 if (event->group_leader != event)
2754 err = validate_group(event);
Stephane Eranian81130702010-01-21 17:39:01 +02002755
2756 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002757 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002758 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002759 if (event->destroy)
2760 event->destroy(event);
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02002761 return ERR_PTR(err);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002762 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01002763
Robert Richter4aeb0b42009-04-29 12:47:03 +02002764 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002765}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002766
2767/*
2768 * callchain support
2769 */
2770
2771static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002772void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002773{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002774 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002775 entry->ip[entry->nr++] = ip;
2776}
2777
Tejun Heo245b2e72009-06-24 15:13:48 +09002778static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_irq_entry);
2779static DEFINE_PER_CPU(struct perf_callchain_entry, pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002780
2781
2782static void
2783backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
2784{
2785 /* Ignore warnings */
2786}
2787
2788static void backtrace_warning(void *data, char *msg)
2789{
2790 /* Ignore warnings */
2791}
2792
2793static int backtrace_stack(void *data, char *name)
2794{
Ingo Molnar038e8362009-06-15 09:57:59 +02002795 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002796}
2797
2798static void backtrace_address(void *data, unsigned long addr, int reliable)
2799{
2800 struct perf_callchain_entry *entry = data;
2801
2802 if (reliable)
2803 callchain_store(entry, addr);
2804}
2805
2806static const struct stacktrace_ops backtrace_ops = {
2807 .warning = backtrace_warning,
2808 .warning_symbol = backtrace_warning_symbol,
2809 .stack = backtrace_stack,
2810 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01002811 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002812};
2813
Ingo Molnar038e8362009-06-15 09:57:59 +02002814#include "../dumpstack.h"
2815
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002816static void
2817perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
2818{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002819 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02002820 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002821
Frederic Weisbecker48b5ba92009-12-31 05:53:02 +01002822 dump_trace(NULL, regs, NULL, regs->bp, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002823}
2824
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002825/*
2826 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
2827 */
2828static unsigned long
2829copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002830{
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002831 unsigned long offset, addr = (unsigned long)from;
2832 int type = in_nmi() ? KM_NMI : KM_IRQ0;
2833 unsigned long size, len = 0;
2834 struct page *page;
2835 void *map;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002836 int ret;
2837
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002838 do {
2839 ret = __get_user_pages_fast(addr, 1, 0, &page);
2840 if (!ret)
2841 break;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002842
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002843 offset = addr & (PAGE_SIZE - 1);
2844 size = min(PAGE_SIZE - offset, n - len);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002845
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002846 map = kmap_atomic(page, type);
2847 memcpy(to, map+offset, size);
2848 kunmap_atomic(map, type);
2849 put_page(page);
2850
2851 len += size;
2852 to += size;
2853 addr += size;
2854
2855 } while (len < n);
2856
2857 return len;
2858}
2859
2860static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
2861{
2862 unsigned long bytes;
2863
2864 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
2865
2866 return bytes == sizeof(*frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002867}
2868
2869static void
2870perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
2871{
2872 struct stack_frame frame;
2873 const void __user *fp;
2874
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002875 if (!user_mode(regs))
2876 regs = task_pt_regs(current);
2877
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002878 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002879
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002880 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002881 callchain_store(entry, regs->ip);
2882
Peter Zijlstraf9188e02009-06-18 22:20:52 +02002883 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Ingo Molnar038e8362009-06-15 09:57:59 +02002884 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002885 frame.return_address = 0;
2886
2887 if (!copy_stack_frame(fp, &frame))
2888 break;
2889
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002890 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002891 break;
2892
2893 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02002894 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002895 }
2896}
2897
2898static void
2899perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
2900{
2901 int is_user;
2902
2903 if (!regs)
2904 return;
2905
2906 is_user = user_mode(regs);
2907
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002908 if (is_user && current->state != TASK_RUNNING)
2909 return;
2910
2911 if (!is_user)
2912 perf_callchain_kernel(regs, entry);
2913
2914 if (current->mm)
2915 perf_callchain_user(regs, entry);
2916}
2917
2918struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
2919{
2920 struct perf_callchain_entry *entry;
2921
2922 if (in_nmi())
Tejun Heo245b2e72009-06-24 15:13:48 +09002923 entry = &__get_cpu_var(pmc_nmi_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002924 else
Tejun Heo245b2e72009-06-24 15:13:48 +09002925 entry = &__get_cpu_var(pmc_irq_entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002926
2927 entry->nr = 0;
2928
2929 perf_do_callchain(regs, entry);
2930
2931 return entry;
2932}
Markus Metzger30dd5682009-07-21 15:56:48 +02002933
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002934void hw_perf_event_setup_online(int cpu)
Markus Metzger30dd5682009-07-21 15:56:48 +02002935{
2936 init_debug_store_on_cpu(cpu);
2937}