blob: 900332b800f870d9f3f9e03bf2c5fa12505bc914 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
2 * Performance counter x86 architecture code
3 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
8 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra <pzijlstr@redhat.com>
Ingo Molnar241771e2008-12-03 10:39:53 +01009 *
10 * For licencing details see kernel-base/COPYING
11 */
12
13#include <linux/perf_counter.h>
14#include <linux/capability.h>
15#include <linux/notifier.h>
16#include <linux/hardirq.h>
17#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010018#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010019#include <linux/kdebug.h>
20#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020021#include <linux/uaccess.h>
Peter Zijlstra74193ef2009-06-15 13:07:24 +020022#include <linux/highmem.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010023
Ingo Molnar241771e2008-12-03 10:39:53 +010024#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020025#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020026#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010027
Ingo Molnar862a1a52008-12-17 13:09:20 +010028static u64 perf_counter_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010029
Ingo Molnar241771e2008-12-03 10:39:53 +010030struct cpu_hw_counters {
Ingo Molnar862a1a52008-12-17 13:09:20 +010031 struct perf_counter *counters[X86_PMC_IDX_MAX];
Robert Richter43f62012009-04-29 16:55:56 +020032 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
33 unsigned long active_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010034 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010035 int enabled;
Ingo Molnar241771e2008-12-03 10:39:53 +010036};
37
38/*
Robert Richter5f4ec282009-04-29 12:47:04 +020039 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +010040 */
Robert Richter5f4ec282009-04-29 12:47:04 +020041struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +020042 const char *name;
43 int version;
Yong Wanga3288102009-06-03 13:12:55 +080044 int (*handle_irq)(struct pt_regs *);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +020045 void (*disable_all)(void);
46 void (*enable_all)(void);
Robert Richter7c90cc42009-04-29 12:47:18 +020047 void (*enable)(struct hw_perf_counter *, int);
Robert Richterd4369892009-04-29 12:47:19 +020048 void (*disable)(struct hw_perf_counter *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053049 unsigned eventsel;
50 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010051 u64 (*event_map)(int);
52 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053053 int max_events;
Robert Richter0933e5c2009-04-29 12:47:12 +020054 int num_counters;
55 int num_counters_fixed;
56 int counter_bits;
57 u64 counter_mask;
Ingo Molnar04da8a42009-08-11 10:40:08 +020058 int apic;
Robert Richterc619b8f2009-04-29 12:47:23 +020059 u64 max_period;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +020060 u64 intel_ctrl;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053061};
62
Robert Richter4a06bd82009-04-29 12:47:11 +020063static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053064
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010065static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
66 .enabled = 1,
67};
Ingo Molnar241771e2008-12-03 10:39:53 +010068
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053069/*
Vince Weaver11d15782009-07-08 17:46:14 -040070 * Not sure about some of these
71 */
72static const u64 p6_perfmon_event_map[] =
73{
74 [PERF_COUNT_HW_CPU_CYCLES] = 0x0079,
75 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
Ingo Molnarf64cccc2009-08-11 10:26:33 +020076 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0f2e,
77 [PERF_COUNT_HW_CACHE_MISSES] = 0x012e,
Vince Weaver11d15782009-07-08 17:46:14 -040078 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
79 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
80 [PERF_COUNT_HW_BUS_CYCLES] = 0x0062,
81};
82
83static u64 p6_pmu_event_map(int event)
84{
85 return p6_perfmon_event_map[event];
86}
87
Peter Zijlstra9c74fb52009-07-08 10:21:41 +020088/*
89 * Counter setting that is specified not to count anything.
90 * We use this to effectively disable a counter.
91 *
92 * L2_RQSTS with 0 MESI unit mask.
93 */
94#define P6_NOP_COUNTER 0x0000002EULL
95
Vince Weaver11d15782009-07-08 17:46:14 -040096static u64 p6_pmu_raw_event(u64 event)
97{
98#define P6_EVNTSEL_EVENT_MASK 0x000000FFULL
99#define P6_EVNTSEL_UNIT_MASK 0x0000FF00ULL
100#define P6_EVNTSEL_EDGE_MASK 0x00040000ULL
101#define P6_EVNTSEL_INV_MASK 0x00800000ULL
102#define P6_EVNTSEL_COUNTER_MASK 0xFF000000ULL
103
104#define P6_EVNTSEL_MASK \
105 (P6_EVNTSEL_EVENT_MASK | \
106 P6_EVNTSEL_UNIT_MASK | \
107 P6_EVNTSEL_EDGE_MASK | \
108 P6_EVNTSEL_INV_MASK | \
109 P6_EVNTSEL_COUNTER_MASK)
110
111 return event & P6_EVNTSEL_MASK;
112}
113
114
115/*
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530116 * Intel PerfMon v3. Used on Core2 and later.
117 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100118static const u64 intel_perfmon_event_map[] =
Ingo Molnar241771e2008-12-03 10:39:53 +0100119{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200120 [PERF_COUNT_HW_CPU_CYCLES] = 0x003c,
121 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
122 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x4f2e,
123 [PERF_COUNT_HW_CACHE_MISSES] = 0x412e,
124 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
125 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
126 [PERF_COUNT_HW_BUS_CYCLES] = 0x013c,
Ingo Molnar241771e2008-12-03 10:39:53 +0100127};
128
Robert Richter5f4ec282009-04-29 12:47:04 +0200129static u64 intel_pmu_event_map(int event)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530130{
131 return intel_perfmon_event_map[event];
132}
Ingo Molnar241771e2008-12-03 10:39:53 +0100133
Ingo Molnar8326f442009-06-05 20:22:46 +0200134/*
135 * Generalized hw caching related event table, filled
136 * in on a per model basis. A value of 0 means
137 * 'not supported', -1 means 'event makes no sense on
138 * this CPU', any other value means the raw event
139 * ID.
140 */
141
142#define C(x) PERF_COUNT_HW_CACHE_##x
143
144static u64 __read_mostly hw_cache_event_ids
145 [PERF_COUNT_HW_CACHE_MAX]
146 [PERF_COUNT_HW_CACHE_OP_MAX]
147 [PERF_COUNT_HW_CACHE_RESULT_MAX];
148
149static const u64 nehalem_hw_cache_event_ids
150 [PERF_COUNT_HW_CACHE_MAX]
151 [PERF_COUNT_HW_CACHE_OP_MAX]
152 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
153{
154 [ C(L1D) ] = {
155 [ C(OP_READ) ] = {
156 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
157 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
158 },
159 [ C(OP_WRITE) ] = {
160 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
161 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
162 },
163 [ C(OP_PREFETCH) ] = {
164 [ C(RESULT_ACCESS) ] = 0x014e, /* L1D_PREFETCH.REQUESTS */
165 [ C(RESULT_MISS) ] = 0x024e, /* L1D_PREFETCH.MISS */
166 },
167 },
168 [ C(L1I ) ] = {
169 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800170 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
Ingo Molnar8326f442009-06-05 20:22:46 +0200171 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
172 },
173 [ C(OP_WRITE) ] = {
174 [ C(RESULT_ACCESS) ] = -1,
175 [ C(RESULT_MISS) ] = -1,
176 },
177 [ C(OP_PREFETCH) ] = {
178 [ C(RESULT_ACCESS) ] = 0x0,
179 [ C(RESULT_MISS) ] = 0x0,
180 },
181 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200182 [ C(LL ) ] = {
Ingo Molnar8326f442009-06-05 20:22:46 +0200183 [ C(OP_READ) ] = {
184 [ C(RESULT_ACCESS) ] = 0x0324, /* L2_RQSTS.LOADS */
185 [ C(RESULT_MISS) ] = 0x0224, /* L2_RQSTS.LD_MISS */
186 },
187 [ C(OP_WRITE) ] = {
188 [ C(RESULT_ACCESS) ] = 0x0c24, /* L2_RQSTS.RFOS */
189 [ C(RESULT_MISS) ] = 0x0824, /* L2_RQSTS.RFO_MISS */
190 },
191 [ C(OP_PREFETCH) ] = {
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200192 [ C(RESULT_ACCESS) ] = 0x4f2e, /* LLC Reference */
193 [ C(RESULT_MISS) ] = 0x412e, /* LLC Misses */
Ingo Molnar8326f442009-06-05 20:22:46 +0200194 },
195 },
196 [ C(DTLB) ] = {
197 [ C(OP_READ) ] = {
198 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
199 [ C(RESULT_MISS) ] = 0x0108, /* DTLB_LOAD_MISSES.ANY */
200 },
201 [ C(OP_WRITE) ] = {
202 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
203 [ C(RESULT_MISS) ] = 0x010c, /* MEM_STORE_RETIRED.DTLB_MISS */
204 },
205 [ C(OP_PREFETCH) ] = {
206 [ C(RESULT_ACCESS) ] = 0x0,
207 [ C(RESULT_MISS) ] = 0x0,
208 },
209 },
210 [ C(ITLB) ] = {
211 [ C(OP_READ) ] = {
212 [ C(RESULT_ACCESS) ] = 0x01c0, /* INST_RETIRED.ANY_P */
Yong Wangfecc8ac2009-06-09 21:15:53 +0800213 [ C(RESULT_MISS) ] = 0x20c8, /* ITLB_MISS_RETIRED */
Ingo Molnar8326f442009-06-05 20:22:46 +0200214 },
215 [ C(OP_WRITE) ] = {
216 [ C(RESULT_ACCESS) ] = -1,
217 [ C(RESULT_MISS) ] = -1,
218 },
219 [ C(OP_PREFETCH) ] = {
220 [ C(RESULT_ACCESS) ] = -1,
221 [ C(RESULT_MISS) ] = -1,
222 },
223 },
224 [ C(BPU ) ] = {
225 [ C(OP_READ) ] = {
226 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ALL_BRANCHES */
227 [ C(RESULT_MISS) ] = 0x03e8, /* BPU_CLEARS.ANY */
228 },
229 [ C(OP_WRITE) ] = {
230 [ C(RESULT_ACCESS) ] = -1,
231 [ C(RESULT_MISS) ] = -1,
232 },
233 [ C(OP_PREFETCH) ] = {
234 [ C(RESULT_ACCESS) ] = -1,
235 [ C(RESULT_MISS) ] = -1,
236 },
237 },
238};
239
240static const u64 core2_hw_cache_event_ids
241 [PERF_COUNT_HW_CACHE_MAX]
242 [PERF_COUNT_HW_CACHE_OP_MAX]
243 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
244{
Thomas Gleixner0312af82009-06-08 07:42:04 +0200245 [ C(L1D) ] = {
246 [ C(OP_READ) ] = {
247 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI */
248 [ C(RESULT_MISS) ] = 0x0140, /* L1D_CACHE_LD.I_STATE */
249 },
250 [ C(OP_WRITE) ] = {
251 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI */
252 [ C(RESULT_MISS) ] = 0x0141, /* L1D_CACHE_ST.I_STATE */
253 },
254 [ C(OP_PREFETCH) ] = {
255 [ C(RESULT_ACCESS) ] = 0x104e, /* L1D_PREFETCH.REQUESTS */
256 [ C(RESULT_MISS) ] = 0,
257 },
258 },
259 [ C(L1I ) ] = {
260 [ C(OP_READ) ] = {
261 [ C(RESULT_ACCESS) ] = 0x0080, /* L1I.READS */
262 [ C(RESULT_MISS) ] = 0x0081, /* L1I.MISSES */
263 },
264 [ C(OP_WRITE) ] = {
265 [ C(RESULT_ACCESS) ] = -1,
266 [ C(RESULT_MISS) ] = -1,
267 },
268 [ C(OP_PREFETCH) ] = {
269 [ C(RESULT_ACCESS) ] = 0,
270 [ C(RESULT_MISS) ] = 0,
271 },
272 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200273 [ C(LL ) ] = {
Thomas Gleixner0312af82009-06-08 07:42:04 +0200274 [ C(OP_READ) ] = {
275 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
276 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
277 },
278 [ C(OP_WRITE) ] = {
279 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
280 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
281 },
282 [ C(OP_PREFETCH) ] = {
283 [ C(RESULT_ACCESS) ] = 0,
284 [ C(RESULT_MISS) ] = 0,
285 },
286 },
287 [ C(DTLB) ] = {
288 [ C(OP_READ) ] = {
289 [ C(RESULT_ACCESS) ] = 0x0f40, /* L1D_CACHE_LD.MESI (alias) */
290 [ C(RESULT_MISS) ] = 0x0208, /* DTLB_MISSES.MISS_LD */
291 },
292 [ C(OP_WRITE) ] = {
293 [ C(RESULT_ACCESS) ] = 0x0f41, /* L1D_CACHE_ST.MESI (alias) */
294 [ C(RESULT_MISS) ] = 0x0808, /* DTLB_MISSES.MISS_ST */
295 },
296 [ C(OP_PREFETCH) ] = {
297 [ C(RESULT_ACCESS) ] = 0,
298 [ C(RESULT_MISS) ] = 0,
299 },
300 },
301 [ C(ITLB) ] = {
302 [ C(OP_READ) ] = {
303 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
304 [ C(RESULT_MISS) ] = 0x1282, /* ITLBMISSES */
305 },
306 [ C(OP_WRITE) ] = {
307 [ C(RESULT_ACCESS) ] = -1,
308 [ C(RESULT_MISS) ] = -1,
309 },
310 [ C(OP_PREFETCH) ] = {
311 [ C(RESULT_ACCESS) ] = -1,
312 [ C(RESULT_MISS) ] = -1,
313 },
314 },
315 [ C(BPU ) ] = {
316 [ C(OP_READ) ] = {
317 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
318 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
319 },
320 [ C(OP_WRITE) ] = {
321 [ C(RESULT_ACCESS) ] = -1,
322 [ C(RESULT_MISS) ] = -1,
323 },
324 [ C(OP_PREFETCH) ] = {
325 [ C(RESULT_ACCESS) ] = -1,
326 [ C(RESULT_MISS) ] = -1,
327 },
328 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200329};
330
331static const u64 atom_hw_cache_event_ids
332 [PERF_COUNT_HW_CACHE_MAX]
333 [PERF_COUNT_HW_CACHE_OP_MAX]
334 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
335{
Thomas Gleixnerad689222009-06-08 09:30:41 +0200336 [ C(L1D) ] = {
337 [ C(OP_READ) ] = {
338 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE.LD */
339 [ C(RESULT_MISS) ] = 0,
340 },
341 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800342 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE.ST */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200343 [ C(RESULT_MISS) ] = 0,
344 },
345 [ C(OP_PREFETCH) ] = {
346 [ C(RESULT_ACCESS) ] = 0x0,
347 [ C(RESULT_MISS) ] = 0,
348 },
349 },
350 [ C(L1I ) ] = {
351 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800352 [ C(RESULT_ACCESS) ] = 0x0380, /* L1I.READS */
353 [ C(RESULT_MISS) ] = 0x0280, /* L1I.MISSES */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200354 },
355 [ C(OP_WRITE) ] = {
356 [ C(RESULT_ACCESS) ] = -1,
357 [ C(RESULT_MISS) ] = -1,
358 },
359 [ C(OP_PREFETCH) ] = {
360 [ C(RESULT_ACCESS) ] = 0,
361 [ C(RESULT_MISS) ] = 0,
362 },
363 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200364 [ C(LL ) ] = {
Thomas Gleixnerad689222009-06-08 09:30:41 +0200365 [ C(OP_READ) ] = {
366 [ C(RESULT_ACCESS) ] = 0x4f29, /* L2_LD.MESI */
367 [ C(RESULT_MISS) ] = 0x4129, /* L2_LD.ISTATE */
368 },
369 [ C(OP_WRITE) ] = {
370 [ C(RESULT_ACCESS) ] = 0x4f2A, /* L2_ST.MESI */
371 [ C(RESULT_MISS) ] = 0x412A, /* L2_ST.ISTATE */
372 },
373 [ C(OP_PREFETCH) ] = {
374 [ C(RESULT_ACCESS) ] = 0,
375 [ C(RESULT_MISS) ] = 0,
376 },
377 },
378 [ C(DTLB) ] = {
379 [ C(OP_READ) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800380 [ C(RESULT_ACCESS) ] = 0x2140, /* L1D_CACHE_LD.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200381 [ C(RESULT_MISS) ] = 0x0508, /* DTLB_MISSES.MISS_LD */
382 },
383 [ C(OP_WRITE) ] = {
Yong Wangfecc8ac2009-06-09 21:15:53 +0800384 [ C(RESULT_ACCESS) ] = 0x2240, /* L1D_CACHE_ST.MESI (alias) */
Thomas Gleixnerad689222009-06-08 09:30:41 +0200385 [ C(RESULT_MISS) ] = 0x0608, /* DTLB_MISSES.MISS_ST */
386 },
387 [ C(OP_PREFETCH) ] = {
388 [ C(RESULT_ACCESS) ] = 0,
389 [ C(RESULT_MISS) ] = 0,
390 },
391 },
392 [ C(ITLB) ] = {
393 [ C(OP_READ) ] = {
394 [ C(RESULT_ACCESS) ] = 0x00c0, /* INST_RETIRED.ANY_P */
395 [ C(RESULT_MISS) ] = 0x0282, /* ITLB.MISSES */
396 },
397 [ C(OP_WRITE) ] = {
398 [ C(RESULT_ACCESS) ] = -1,
399 [ C(RESULT_MISS) ] = -1,
400 },
401 [ C(OP_PREFETCH) ] = {
402 [ C(RESULT_ACCESS) ] = -1,
403 [ C(RESULT_MISS) ] = -1,
404 },
405 },
406 [ C(BPU ) ] = {
407 [ C(OP_READ) ] = {
408 [ C(RESULT_ACCESS) ] = 0x00c4, /* BR_INST_RETIRED.ANY */
409 [ C(RESULT_MISS) ] = 0x00c5, /* BP_INST_RETIRED.MISPRED */
410 },
411 [ C(OP_WRITE) ] = {
412 [ C(RESULT_ACCESS) ] = -1,
413 [ C(RESULT_MISS) ] = -1,
414 },
415 [ C(OP_PREFETCH) ] = {
416 [ C(RESULT_ACCESS) ] = -1,
417 [ C(RESULT_MISS) ] = -1,
418 },
419 },
Ingo Molnar8326f442009-06-05 20:22:46 +0200420};
421
Robert Richter5f4ec282009-04-29 12:47:04 +0200422static u64 intel_pmu_raw_event(u64 event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100423{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100424#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
425#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200426#define CORE_EVNTSEL_EDGE_MASK 0x00040000ULL
427#define CORE_EVNTSEL_INV_MASK 0x00800000ULL
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100428#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100429
Ingo Molnar128f0482009-06-03 22:19:36 +0200430#define CORE_EVNTSEL_MASK \
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100431 (CORE_EVNTSEL_EVENT_MASK | \
432 CORE_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200433 CORE_EVNTSEL_EDGE_MASK | \
434 CORE_EVNTSEL_INV_MASK | \
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100435 CORE_EVNTSEL_COUNTER_MASK)
436
437 return event & CORE_EVNTSEL_MASK;
438}
439
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530440static const u64 amd_hw_cache_event_ids
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200441 [PERF_COUNT_HW_CACHE_MAX]
442 [PERF_COUNT_HW_CACHE_OP_MAX]
443 [PERF_COUNT_HW_CACHE_RESULT_MAX] =
444{
445 [ C(L1D) ] = {
446 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530447 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
448 [ C(RESULT_MISS) ] = 0x0041, /* Data Cache Misses */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200449 },
450 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputd9f2a5e2009-06-20 13:19:25 +0530451 [ C(RESULT_ACCESS) ] = 0x0142, /* Data Cache Refills :system */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200452 [ C(RESULT_MISS) ] = 0,
453 },
454 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530455 [ C(RESULT_ACCESS) ] = 0x0267, /* Data Prefetcher :attempts */
456 [ C(RESULT_MISS) ] = 0x0167, /* Data Prefetcher :cancelled */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200457 },
458 },
459 [ C(L1I ) ] = {
460 [ C(OP_READ) ] = {
461 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction cache fetches */
462 [ C(RESULT_MISS) ] = 0x0081, /* Instruction cache misses */
463 },
464 [ C(OP_WRITE) ] = {
465 [ C(RESULT_ACCESS) ] = -1,
466 [ C(RESULT_MISS) ] = -1,
467 },
468 [ C(OP_PREFETCH) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530469 [ C(RESULT_ACCESS) ] = 0x014B, /* Prefetch Instructions :Load */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200470 [ C(RESULT_MISS) ] = 0,
471 },
472 },
Peter Zijlstra8be6e8f2009-06-11 14:19:11 +0200473 [ C(LL ) ] = {
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200474 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530475 [ C(RESULT_ACCESS) ] = 0x037D, /* Requests to L2 Cache :IC+DC */
476 [ C(RESULT_MISS) ] = 0x037E, /* L2 Cache Misses : IC+DC */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200477 },
478 [ C(OP_WRITE) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530479 [ C(RESULT_ACCESS) ] = 0x017F, /* L2 Fill/Writeback */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200480 [ C(RESULT_MISS) ] = 0,
481 },
482 [ C(OP_PREFETCH) ] = {
483 [ C(RESULT_ACCESS) ] = 0,
484 [ C(RESULT_MISS) ] = 0,
485 },
486 },
487 [ C(DTLB) ] = {
488 [ C(OP_READ) ] = {
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +0530489 [ C(RESULT_ACCESS) ] = 0x0040, /* Data Cache Accesses */
490 [ C(RESULT_MISS) ] = 0x0046, /* L1 DTLB and L2 DLTB Miss */
Thomas Gleixnerf86748e2009-06-08 22:33:10 +0200491 },
492 [ C(OP_WRITE) ] = {
493 [ C(RESULT_ACCESS) ] = 0,
494 [ C(RESULT_MISS) ] = 0,
495 },
496 [ C(OP_PREFETCH) ] = {
497 [ C(RESULT_ACCESS) ] = 0,
498 [ C(RESULT_MISS) ] = 0,
499 },
500 },
501 [ C(ITLB) ] = {
502 [ C(OP_READ) ] = {
503 [ C(RESULT_ACCESS) ] = 0x0080, /* Instruction fecthes */
504 [ C(RESULT_MISS) ] = 0x0085, /* Instr. fetch ITLB misses */
505 },
506 [ C(OP_WRITE) ] = {
507 [ C(RESULT_ACCESS) ] = -1,
508 [ C(RESULT_MISS) ] = -1,
509 },
510 [ C(OP_PREFETCH) ] = {
511 [ C(RESULT_ACCESS) ] = -1,
512 [ C(RESULT_MISS) ] = -1,
513 },
514 },
515 [ C(BPU ) ] = {
516 [ C(OP_READ) ] = {
517 [ C(RESULT_ACCESS) ] = 0x00c2, /* Retired Branch Instr. */
518 [ C(RESULT_MISS) ] = 0x00c3, /* Retired Mispredicted BI */
519 },
520 [ C(OP_WRITE) ] = {
521 [ C(RESULT_ACCESS) ] = -1,
522 [ C(RESULT_MISS) ] = -1,
523 },
524 [ C(OP_PREFETCH) ] = {
525 [ C(RESULT_ACCESS) ] = -1,
526 [ C(RESULT_MISS) ] = -1,
527 },
528 },
529};
530
Ingo Molnar241771e2008-12-03 10:39:53 +0100531/*
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530532 * AMD Performance Monitor K7 and later.
533 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100534static const u64 amd_perfmon_event_map[] =
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530535{
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +0200536 [PERF_COUNT_HW_CPU_CYCLES] = 0x0076,
537 [PERF_COUNT_HW_INSTRUCTIONS] = 0x00c0,
538 [PERF_COUNT_HW_CACHE_REFERENCES] = 0x0080,
539 [PERF_COUNT_HW_CACHE_MISSES] = 0x0081,
540 [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = 0x00c4,
541 [PERF_COUNT_HW_BRANCH_MISSES] = 0x00c5,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530542};
543
Robert Richter5f4ec282009-04-29 12:47:04 +0200544static u64 amd_pmu_event_map(int event)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530545{
546 return amd_perfmon_event_map[event];
547}
548
Robert Richter5f4ec282009-04-29 12:47:04 +0200549static u64 amd_pmu_raw_event(u64 event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100550{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100551#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
552#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
Peter Zijlstraff99be52009-05-25 17:39:03 +0200553#define K7_EVNTSEL_EDGE_MASK 0x000040000ULL
554#define K7_EVNTSEL_INV_MASK 0x000800000ULL
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100555#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100556
557#define K7_EVNTSEL_MASK \
558 (K7_EVNTSEL_EVENT_MASK | \
559 K7_EVNTSEL_UNIT_MASK | \
Peter Zijlstraff99be52009-05-25 17:39:03 +0200560 K7_EVNTSEL_EDGE_MASK | \
561 K7_EVNTSEL_INV_MASK | \
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100562 K7_EVNTSEL_COUNTER_MASK)
563
564 return event & K7_EVNTSEL_MASK;
565}
566
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530567/*
Ingo Molnaree060942008-12-13 09:00:03 +0100568 * Propagate counter elapsed time into the generic counter.
569 * Can only be executed on the CPU where the counter is active.
570 * Returns the delta events processed.
571 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200572static u64
Ingo Molnaree060942008-12-13 09:00:03 +0100573x86_perf_counter_update(struct perf_counter *counter,
574 struct hw_perf_counter *hwc, int idx)
575{
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200576 int shift = 64 - x86_pmu.counter_bits;
577 u64 prev_raw_count, new_raw_count;
578 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +0100579
Ingo Molnaree060942008-12-13 09:00:03 +0100580 /*
581 * Careful: an NMI might modify the previous counter value.
582 *
583 * Our tactic to handle this is to first atomically read and
584 * exchange a new raw count - then add that new-prev delta
585 * count to the generic counter atomically:
586 */
587again:
588 prev_raw_count = atomic64_read(&hwc->prev_count);
589 rdmsrl(hwc->counter_base + idx, new_raw_count);
590
591 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
592 new_raw_count) != prev_raw_count)
593 goto again;
594
595 /*
596 * Now we have the new raw value and have updated the prev
597 * timestamp already. We can now calculate the elapsed delta
598 * (counter-)time and add that to the generic counter.
599 *
600 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200601 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100602 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200603 delta = (new_raw_count << shift) - (prev_raw_count << shift);
604 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100605
606 atomic64_add(delta, &counter->count);
607 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200608
609 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100610}
611
Peter Zijlstraba778132009-05-04 18:47:44 +0200612static atomic_t active_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200613static DEFINE_MUTEX(pmc_reserve_mutex);
614
615static bool reserve_pmc_hardware(void)
616{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200617#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200618 int i;
619
620 if (nmi_watchdog == NMI_LOCAL_APIC)
621 disable_lapic_nmi_watchdog();
622
Robert Richter0933e5c2009-04-29 12:47:12 +0200623 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200624 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200625 goto perfctr_fail;
626 }
627
Robert Richter0933e5c2009-04-29 12:47:12 +0200628 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200629 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200630 goto eventsel_fail;
631 }
Ingo Molnar04da8a42009-08-11 10:40:08 +0200632#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200633
634 return true;
635
Ingo Molnar04da8a42009-08-11 10:40:08 +0200636#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200637eventsel_fail:
638 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200639 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200640
Robert Richter0933e5c2009-04-29 12:47:12 +0200641 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200642
643perfctr_fail:
644 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200645 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200646
647 if (nmi_watchdog == NMI_LOCAL_APIC)
648 enable_lapic_nmi_watchdog();
649
650 return false;
Ingo Molnar04da8a42009-08-11 10:40:08 +0200651#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200652}
653
654static void release_pmc_hardware(void)
655{
Ingo Molnar04da8a42009-08-11 10:40:08 +0200656#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200657 int i;
658
Robert Richter0933e5c2009-04-29 12:47:12 +0200659 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200660 release_perfctr_nmi(x86_pmu.perfctr + i);
661 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200662 }
663
664 if (nmi_watchdog == NMI_LOCAL_APIC)
665 enable_lapic_nmi_watchdog();
Ingo Molnar04da8a42009-08-11 10:40:08 +0200666#endif
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200667}
668
669static void hw_perf_counter_destroy(struct perf_counter *counter)
670{
Peter Zijlstraba778132009-05-04 18:47:44 +0200671 if (atomic_dec_and_mutex_lock(&active_counters, &pmc_reserve_mutex)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200672 release_pmc_hardware();
673 mutex_unlock(&pmc_reserve_mutex);
674 }
675}
676
Robert Richter85cf9db2009-04-29 12:47:20 +0200677static inline int x86_pmu_initialized(void)
678{
679 return x86_pmu.handle_irq != NULL;
680}
681
Ingo Molnar8326f442009-06-05 20:22:46 +0200682static inline int
683set_ext_hw_attr(struct hw_perf_counter *hwc, struct perf_counter_attr *attr)
684{
685 unsigned int cache_type, cache_op, cache_result;
686 u64 config, val;
687
688 config = attr->config;
689
690 cache_type = (config >> 0) & 0xff;
691 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
692 return -EINVAL;
693
694 cache_op = (config >> 8) & 0xff;
695 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
696 return -EINVAL;
697
698 cache_result = (config >> 16) & 0xff;
699 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
700 return -EINVAL;
701
702 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
703
704 if (val == 0)
705 return -ENOENT;
706
707 if (val == -1)
708 return -EINVAL;
709
710 hwc->config |= val;
711
712 return 0;
713}
714
Ingo Molnaree060942008-12-13 09:00:03 +0100715/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200716 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100717 */
Ingo Molnar621a01e2008-12-11 12:46:46 +0100718static int __hw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100719{
Peter Zijlstra0d486962009-06-02 19:22:16 +0200720 struct perf_counter_attr *attr = &counter->attr;
Ingo Molnar241771e2008-12-03 10:39:53 +0100721 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200722 u64 config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200723 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100724
Robert Richter85cf9db2009-04-29 12:47:20 +0200725 if (!x86_pmu_initialized())
726 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100727
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200728 err = 0;
Peter Zijlstraba778132009-05-04 18:47:44 +0200729 if (!atomic_inc_not_zero(&active_counters)) {
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200730 mutex_lock(&pmc_reserve_mutex);
Peter Zijlstraba778132009-05-04 18:47:44 +0200731 if (atomic_read(&active_counters) == 0 && !reserve_pmc_hardware())
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200732 err = -EBUSY;
733 else
Peter Zijlstraba778132009-05-04 18:47:44 +0200734 atomic_inc(&active_counters);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200735 mutex_unlock(&pmc_reserve_mutex);
736 }
737 if (err)
738 return err;
739
Ingo Molnar241771e2008-12-03 10:39:53 +0100740 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100741 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +0100742 * (keep 'enabled' bit clear for now)
743 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100744 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +0100745
746 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100747 * Count user and OS events unless requested not to.
748 */
Peter Zijlstra0d486962009-06-02 19:22:16 +0200749 if (!attr->exclude_user)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100750 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
Peter Zijlstra0d486962009-06-02 19:22:16 +0200751 if (!attr->exclude_kernel)
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100752 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
753
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200754 if (!hwc->sample_period) {
Peter Zijlstrab23f3322009-06-02 15:13:03 +0200755 hwc->sample_period = x86_pmu.max_period;
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200756 hwc->last_period = hwc->sample_period;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200757 atomic64_set(&hwc->period_left, hwc->sample_period);
Ingo Molnar04da8a42009-08-11 10:40:08 +0200758 } else {
759 /*
760 * If we have a PMU initialized but no APIC
761 * interrupts, we cannot sample hardware
762 * counters (user-space has to fall back and
763 * sample via a hrtimer based software counter):
764 */
765 if (!x86_pmu.apic)
766 return -EOPNOTSUPP;
Peter Zijlstrabd2b5b12009-06-10 13:40:57 +0200767 }
Ingo Molnard2517a42009-05-17 10:04:45 +0200768
Ingo Molnar8326f442009-06-05 20:22:46 +0200769 counter->destroy = hw_perf_counter_destroy;
Ingo Molnar241771e2008-12-03 10:39:53 +0100770
771 /*
Thomas Gleixnerdfa7c892008-12-08 19:35:37 +0100772 * Raw event type provide the config in the event structure
Ingo Molnar241771e2008-12-03 10:39:53 +0100773 */
Ingo Molnara21ca2c2009-06-06 09:58:57 +0200774 if (attr->type == PERF_TYPE_RAW) {
775 hwc->config |= x86_pmu.raw_event(attr->config);
Ingo Molnar8326f442009-06-05 20:22:46 +0200776 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100777 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100778
Ingo Molnar8326f442009-06-05 20:22:46 +0200779 if (attr->type == PERF_TYPE_HW_CACHE)
780 return set_ext_hw_attr(hwc, attr);
781
782 if (attr->config >= x86_pmu.max_events)
783 return -EINVAL;
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200784
Ingo Molnar8326f442009-06-05 20:22:46 +0200785 /*
786 * The generic map:
787 */
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200788 config = x86_pmu.event_map(attr->config);
789
790 if (config == 0)
791 return -ENOENT;
792
793 if (config == -1LL)
794 return -EINVAL;
795
796 hwc->config |= config;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200797
Ingo Molnar241771e2008-12-03 10:39:53 +0100798 return 0;
799}
800
Vince Weaver11d15782009-07-08 17:46:14 -0400801static void p6_pmu_disable_all(void)
802{
803 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200804 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -0400805
806 if (!cpuc->enabled)
807 return;
808
809 cpuc->enabled = 0;
810 barrier();
811
812 /* p6 only has one enable register */
813 rdmsrl(MSR_P6_EVNTSEL0, val);
814 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
815 wrmsrl(MSR_P6_EVNTSEL0, val);
816}
817
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200818static void intel_pmu_disable_all(void)
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100819{
Ingo Molnar862a1a52008-12-17 13:09:20 +0100820 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100821}
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530822
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200823static void amd_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530824{
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100825 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200826 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100827
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200828 if (!cpuc->enabled)
829 return;
830
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100831 cpuc->enabled = 0;
Peter Zijlstra60b3df92009-03-13 12:21:30 +0100832 /*
833 * ensure we write the disable before we start disabling the
Robert Richter5f4ec282009-04-29 12:47:04 +0200834 * counters proper, so that amd_pmu_enable_counter() does the
835 * right thing.
Peter Zijlstra60b3df92009-03-13 12:21:30 +0100836 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100837 barrier();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530838
Robert Richter0933e5c2009-04-29 12:47:12 +0200839 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100840 u64 val;
841
Robert Richter43f62012009-04-29 16:55:56 +0200842 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200843 continue;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530844 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
Robert Richter4295ee62009-04-29 12:47:01 +0200845 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
846 continue;
847 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
848 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530849 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530850}
851
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200852void hw_perf_disable(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530853{
Robert Richter85cf9db2009-04-29 12:47:20 +0200854 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200855 return;
856 return x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530857}
Ingo Molnar241771e2008-12-03 10:39:53 +0100858
Vince Weaver11d15782009-07-08 17:46:14 -0400859static void p6_pmu_enable_all(void)
860{
861 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
862 unsigned long val;
863
864 if (cpuc->enabled)
865 return;
866
867 cpuc->enabled = 1;
868 barrier();
869
870 /* p6 only has one enable register */
871 rdmsrl(MSR_P6_EVNTSEL0, val);
872 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
873 wrmsrl(MSR_P6_EVNTSEL0, val);
874}
875
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200876static void intel_pmu_enable_all(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530877{
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200878 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, x86_pmu.intel_ctrl);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530879}
880
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200881static void amd_pmu_enable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530882{
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100883 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530884 int idx;
885
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200886 if (cpuc->enabled)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100887 return;
888
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200889 cpuc->enabled = 1;
890 barrier();
891
Robert Richter0933e5c2009-04-29 12:47:12 +0200892 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstra984b8382009-07-10 09:59:56 +0200893 struct perf_counter *counter = cpuc->counters[idx];
Robert Richter4295ee62009-04-29 12:47:01 +0200894 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100895
Robert Richter43f62012009-04-29 16:55:56 +0200896 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200897 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200898
899 val = counter->hw.config;
Robert Richter4295ee62009-04-29 12:47:01 +0200900 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
901 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530902 }
903}
904
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200905void hw_perf_enable(void)
Ingo Molnaree060942008-12-13 09:00:03 +0100906{
Robert Richter85cf9db2009-04-29 12:47:20 +0200907 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100908 return;
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200909 x86_pmu.enable_all();
Ingo Molnaree060942008-12-13 09:00:03 +0100910}
Ingo Molnaree060942008-12-13 09:00:03 +0100911
Robert Richter19d84da2009-04-29 12:47:25 +0200912static inline u64 intel_pmu_get_status(void)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100913{
914 u64 status;
915
916 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
917
918 return status;
919}
920
Robert Richterdee5d902009-04-29 12:47:07 +0200921static inline void intel_pmu_ack_status(u64 ack)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100922{
923 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
924}
925
Robert Richter7c90cc42009-04-29 12:47:18 +0200926static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100927{
Vince Weaver11d15782009-07-08 17:46:14 -0400928 (void)checking_wrmsrl(hwc->config_base + idx,
Robert Richter7c90cc42009-04-29 12:47:18 +0200929 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100930}
931
Robert Richterd4369892009-04-29 12:47:19 +0200932static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100933{
Vince Weaver11d15782009-07-08 17:46:14 -0400934 (void)checking_wrmsrl(hwc->config_base + idx, hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100935}
936
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100937static inline void
Robert Richterd4369892009-04-29 12:47:19 +0200938intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100939{
940 int idx = __idx - X86_PMC_IDX_FIXED;
941 u64 ctrl_val, mask;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100942
943 mask = 0xfULL << (idx * 4);
944
945 rdmsrl(hwc->config_base, ctrl_val);
946 ctrl_val &= ~mask;
Vince Weaver11d15782009-07-08 17:46:14 -0400947 (void)checking_wrmsrl(hwc->config_base, ctrl_val);
948}
949
950static inline void
951p6_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
952{
953 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200954 u64 val = P6_NOP_COUNTER;
Vince Weaver11d15782009-07-08 17:46:14 -0400955
Peter Zijlstra9c74fb52009-07-08 10:21:41 +0200956 if (cpuc->enabled)
957 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
Vince Weaver11d15782009-07-08 17:46:14 -0400958
959 (void)checking_wrmsrl(hwc->config_base + idx, val);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100960}
961
962static inline void
Robert Richterd4369892009-04-29 12:47:19 +0200963intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100964{
Robert Richterd4369892009-04-29 12:47:19 +0200965 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
966 intel_pmu_disable_fixed(hwc, idx);
967 return;
968 }
969
970 x86_pmu_disable_counter(hwc, idx);
971}
972
973static inline void
974amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
975{
976 x86_pmu_disable_counter(hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100977}
978
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100979static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
Ingo Molnar241771e2008-12-03 10:39:53 +0100980
Ingo Molnaree060942008-12-13 09:00:03 +0100981/*
982 * Set the next IRQ period, based on the hwc->period_left value.
983 * To be called with the counter disabled in hw:
984 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200985static int
Robert Richter26816c22009-04-29 12:47:08 +0200986x86_perf_counter_set_period(struct perf_counter *counter,
Ingo Molnaree060942008-12-13 09:00:03 +0100987 struct hw_perf_counter *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +0100988{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100989 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +0200990 s64 period = hwc->sample_period;
991 int err, ret = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100992
Ingo Molnaree060942008-12-13 09:00:03 +0100993 /*
994 * If we are way outside a reasoable range then just skip forward:
995 */
996 if (unlikely(left <= -period)) {
997 left = period;
998 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +0200999 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001000 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001001 }
1002
1003 if (unlikely(left <= 0)) {
1004 left += period;
1005 atomic64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001006 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001007 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001008 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001009 /*
1010 * Quirk: certain CPUs dont like it if just 1 event is left:
1011 */
1012 if (unlikely(left < 2))
1013 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +01001014
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001015 if (left > x86_pmu.max_period)
1016 left = x86_pmu.max_period;
1017
Ingo Molnaree060942008-12-13 09:00:03 +01001018 per_cpu(prev_left[idx], smp_processor_id()) = left;
1019
1020 /*
1021 * The hw counter starts counting from this counter offset,
1022 * mark it to be able to extra future deltas:
1023 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001024 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +01001025
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001026 err = checking_wrmsrl(hwc->counter_base + idx,
Robert Richter0933e5c2009-04-29 12:47:12 +02001027 (u64)(-left) & x86_pmu.counter_mask);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001028
Peter Zijlstra194002b2009-06-22 16:35:24 +02001029 perf_counter_update_userpage(counter);
1030
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001031 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001032}
1033
1034static inline void
Robert Richter7c90cc42009-04-29 12:47:18 +02001035intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001036{
1037 int idx = __idx - X86_PMC_IDX_FIXED;
1038 u64 ctrl_val, bits, mask;
1039 int err;
1040
1041 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001042 * Enable IRQ generation (0x8),
1043 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
1044 * if requested:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001045 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +11001046 bits = 0x8ULL;
1047 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
1048 bits |= 0x2;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001049 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
1050 bits |= 0x1;
1051 bits <<= (idx * 4);
1052 mask = 0xfULL << (idx * 4);
1053
1054 rdmsrl(hwc->config_base, ctrl_val);
1055 ctrl_val &= ~mask;
1056 ctrl_val |= bits;
1057 err = checking_wrmsrl(hwc->config_base, ctrl_val);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001058}
1059
Vince Weaver11d15782009-07-08 17:46:14 -04001060static void p6_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1061{
1062 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
Peter Zijlstra984b8382009-07-10 09:59:56 +02001063 u64 val;
Vince Weaver11d15782009-07-08 17:46:14 -04001064
Peter Zijlstra984b8382009-07-10 09:59:56 +02001065 val = hwc->config;
Vince Weaver11d15782009-07-08 17:46:14 -04001066 if (cpuc->enabled)
Peter Zijlstra984b8382009-07-10 09:59:56 +02001067 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
1068
1069 (void)checking_wrmsrl(hwc->config_base + idx, val);
Vince Weaver11d15782009-07-08 17:46:14 -04001070}
1071
1072
Robert Richter7c90cc42009-04-29 12:47:18 +02001073static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001074{
Robert Richter7c90cc42009-04-29 12:47:18 +02001075 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
1076 intel_pmu_enable_fixed(hwc, idx);
1077 return;
1078 }
1079
1080 x86_pmu_enable_counter(hwc, idx);
1081}
1082
1083static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
1084{
1085 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
1086
1087 if (cpuc->enabled)
1088 x86_pmu_enable_counter(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001089}
1090
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001091static int
1092fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
Ingo Molnar862a1a52008-12-17 13:09:20 +01001093{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001094 unsigned int event;
1095
Robert Richteref7b3e02009-04-29 12:47:24 +02001096 if (!x86_pmu.num_counters_fixed)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301097 return -1;
1098
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001099 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
1100
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02001101 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_INSTRUCTIONS)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001102 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02001103 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_CPU_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001104 return X86_PMC_IDX_FIXED_CPU_CYCLES;
Peter Zijlstraf4dbfa82009-06-11 14:06:28 +02001105 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_HW_BUS_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001106 return X86_PMC_IDX_FIXED_BUS_CYCLES;
1107
Ingo Molnar862a1a52008-12-17 13:09:20 +01001108 return -1;
1109}
1110
Ingo Molnaree060942008-12-13 09:00:03 +01001111/*
1112 * Find a PMC slot for the freshly enabled / scheduled in counter:
1113 */
Robert Richter4aeb0b42009-04-29 12:47:03 +02001114static int x86_pmu_enable(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +01001115{
1116 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
1117 struct hw_perf_counter *hwc = &counter->hw;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001118 int idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001119
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001120 idx = fixed_mode_idx(counter, hwc);
1121 if (idx >= 0) {
1122 /*
1123 * Try to get the fixed counter, if that is already taken
1124 * then try to get a generic counter:
1125 */
Robert Richter43f62012009-04-29 16:55:56 +02001126 if (test_and_set_bit(idx, cpuc->used_mask))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001127 goto try_generic;
Ingo Molnar0dff86a2008-12-23 12:28:12 +01001128
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001129 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
1130 /*
1131 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
1132 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
1133 */
1134 hwc->counter_base =
1135 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
Ingo Molnar241771e2008-12-03 10:39:53 +01001136 hwc->idx = idx;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001137 } else {
1138 idx = hwc->idx;
1139 /* Try to get the previous generic counter again */
Robert Richter43f62012009-04-29 16:55:56 +02001140 if (test_and_set_bit(idx, cpuc->used_mask)) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001141try_generic:
Robert Richter43f62012009-04-29 16:55:56 +02001142 idx = find_first_zero_bit(cpuc->used_mask,
Robert Richter0933e5c2009-04-29 12:47:12 +02001143 x86_pmu.num_counters);
1144 if (idx == x86_pmu.num_counters)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001145 return -EAGAIN;
1146
Robert Richter43f62012009-04-29 16:55:56 +02001147 set_bit(idx, cpuc->used_mask);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001148 hwc->idx = idx;
1149 }
Robert Richter4a06bd82009-04-29 12:47:11 +02001150 hwc->config_base = x86_pmu.eventsel;
1151 hwc->counter_base = x86_pmu.perfctr;
Ingo Molnar241771e2008-12-03 10:39:53 +01001152 }
1153
Yong Wangc323d952009-05-29 13:28:35 +08001154 perf_counters_lapic_init();
Ingo Molnar53b441a2009-05-25 21:41:28 +02001155
Robert Richterd4369892009-04-29 12:47:19 +02001156 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001157
Ingo Molnar862a1a52008-12-17 13:09:20 +01001158 cpuc->counters[idx] = counter;
Robert Richter43f62012009-04-29 16:55:56 +02001159 set_bit(idx, cpuc->active_mask);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001160
Robert Richter26816c22009-04-29 12:47:08 +02001161 x86_perf_counter_set_period(counter, hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +02001162 x86_pmu.enable(hwc, idx);
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001163
Peter Zijlstra194002b2009-06-22 16:35:24 +02001164 perf_counter_update_userpage(counter);
1165
Ingo Molnar95cdd2e2008-12-21 13:50:42 +01001166 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001167}
1168
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001169static void x86_pmu_unthrottle(struct perf_counter *counter)
1170{
1171 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
1172 struct hw_perf_counter *hwc = &counter->hw;
1173
1174 if (WARN_ON_ONCE(hwc->idx >= X86_PMC_IDX_MAX ||
1175 cpuc->counters[hwc->idx] != counter))
1176 return;
1177
1178 x86_pmu.enable(hwc, hwc->idx);
1179}
1180
Ingo Molnar241771e2008-12-03 10:39:53 +01001181void perf_counter_print_debug(void)
1182{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001183 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnar0dff86a2008-12-23 12:28:12 +01001184 struct cpu_hw_counters *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001185 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001186 int cpu, idx;
1187
Robert Richter0933e5c2009-04-29 12:47:12 +02001188 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +01001189 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001190
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001191 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001192
1193 cpu = smp_processor_id();
Ingo Molnar0dff86a2008-12-23 12:28:12 +01001194 cpuc = &per_cpu(cpu_hw_counters, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001195
Robert Richterfaa28ae2009-04-29 12:47:13 +02001196 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301197 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1198 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1199 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1200 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001201
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301202 pr_info("\n");
1203 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1204 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1205 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1206 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301207 }
Robert Richter43f62012009-04-29 16:55:56 +02001208 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001209
Robert Richter0933e5c2009-04-29 12:47:12 +02001210 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +02001211 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
1212 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001213
Ingo Molnaree060942008-12-13 09:00:03 +01001214 prev_left = per_cpu(prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001215
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301216 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001217 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301218 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001219 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301220 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001221 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001222 }
Robert Richter0933e5c2009-04-29 12:47:12 +02001223 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001224 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1225
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301226 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001227 cpu, idx, pmc_count);
1228 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001229 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001230}
1231
Robert Richter4aeb0b42009-04-29 12:47:03 +02001232static void x86_pmu_disable(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +01001233{
1234 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
1235 struct hw_perf_counter *hwc = &counter->hw;
Robert Richter6f00cad2009-04-29 12:47:17 +02001236 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001237
Robert Richter09534232009-04-29 12:47:16 +02001238 /*
1239 * Must be done before we disable, otherwise the nmi handler
1240 * could reenable again:
1241 */
Robert Richter43f62012009-04-29 16:55:56 +02001242 clear_bit(idx, cpuc->active_mask);
Robert Richterd4369892009-04-29 12:47:19 +02001243 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +01001244
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001245 /*
1246 * Make sure the cleared pointer becomes visible before we
1247 * (potentially) free the counter:
1248 */
Robert Richter527e26a2009-04-29 12:47:02 +02001249 barrier();
Ingo Molnar241771e2008-12-03 10:39:53 +01001250
Ingo Molnaree060942008-12-13 09:00:03 +01001251 /*
1252 * Drain the remaining delta count out of a counter
1253 * that we are disabling:
1254 */
1255 x86_perf_counter_update(counter, hwc, idx);
Robert Richter09534232009-04-29 12:47:16 +02001256 cpuc->counters[idx] = NULL;
Robert Richter43f62012009-04-29 16:55:56 +02001257 clear_bit(idx, cpuc->used_mask);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001258
1259 perf_counter_update_userpage(counter);
Ingo Molnar241771e2008-12-03 10:39:53 +01001260}
1261
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001262/*
Ingo Molnaree060942008-12-13 09:00:03 +01001263 * Save and restart an expired counter. Called by NMI contexts,
1264 * so it has to be careful about preempting normal counter ops:
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001265 */
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001266static int intel_pmu_save_and_restart(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +01001267{
1268 struct hw_perf_counter *hwc = &counter->hw;
1269 int idx = hwc->idx;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001270 int ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001271
Ingo Molnaree060942008-12-13 09:00:03 +01001272 x86_perf_counter_update(counter, hwc, idx);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001273 ret = x86_perf_counter_set_period(counter, hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001274
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001275 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
Robert Richter7c90cc42009-04-29 12:47:18 +02001276 intel_pmu_enable_counter(hwc, idx);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001277
1278 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001279}
1280
Ingo Molnaraaba9802009-05-26 08:10:00 +02001281static void intel_pmu_reset(void)
1282{
1283 unsigned long flags;
1284 int idx;
1285
1286 if (!x86_pmu.num_counters)
1287 return;
1288
1289 local_irq_save(flags);
1290
1291 printk("clearing PMU state on CPU#%d\n", smp_processor_id());
1292
1293 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1294 checking_wrmsrl(x86_pmu.eventsel + idx, 0ull);
1295 checking_wrmsrl(x86_pmu.perfctr + idx, 0ull);
1296 }
1297 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
1298 checking_wrmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, 0ull);
1299 }
1300
1301 local_irq_restore(flags);
1302}
1303
Vince Weaver11d15782009-07-08 17:46:14 -04001304static int p6_pmu_handle_irq(struct pt_regs *regs)
1305{
1306 struct perf_sample_data data;
1307 struct cpu_hw_counters *cpuc;
1308 struct perf_counter *counter;
1309 struct hw_perf_counter *hwc;
1310 int idx, handled = 0;
1311 u64 val;
1312
1313 data.regs = regs;
1314 data.addr = 0;
1315
1316 cpuc = &__get_cpu_var(cpu_hw_counters);
1317
1318 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
1319 if (!test_bit(idx, cpuc->active_mask))
1320 continue;
1321
1322 counter = cpuc->counters[idx];
1323 hwc = &counter->hw;
1324
1325 val = x86_perf_counter_update(counter, hwc, idx);
1326 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
1327 continue;
1328
1329 /*
1330 * counter overflow
1331 */
1332 handled = 1;
1333 data.period = counter->hw.last_period;
1334
1335 if (!x86_perf_counter_set_period(counter, hwc, idx))
1336 continue;
1337
1338 if (perf_counter_overflow(counter, 1, &data))
1339 p6_pmu_disable_counter(hwc, idx);
1340 }
1341
1342 if (handled)
1343 inc_irq_stat(apic_perf_irqs);
1344
1345 return handled;
1346}
Ingo Molnaraaba9802009-05-26 08:10:00 +02001347
Ingo Molnar241771e2008-12-03 10:39:53 +01001348/*
1349 * This handler is triggered by the local APIC, so the APIC IRQ handling
1350 * rules apply:
1351 */
Yong Wanga3288102009-06-03 13:12:55 +08001352static int intel_pmu_handle_irq(struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01001353{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001354 struct perf_sample_data data;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001355 struct cpu_hw_counters *cpuc;
Vince Weaver11d15782009-07-08 17:46:14 -04001356 int bit, loops;
Mike Galbraith4b39fd92009-01-23 14:36:16 +01001357 u64 ack, status;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001358
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001359 data.regs = regs;
1360 data.addr = 0;
1361
Vince Weaver11d15782009-07-08 17:46:14 -04001362 cpuc = &__get_cpu_var(cpu_hw_counters);
Ingo Molnar43874d22008-12-09 12:23:59 +01001363
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001364 perf_disable();
Robert Richter19d84da2009-04-29 12:47:25 +02001365 status = intel_pmu_get_status();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001366 if (!status) {
1367 perf_enable();
1368 return 0;
1369 }
Ingo Molnar87b9cf42008-12-08 14:20:16 +01001370
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001371 loops = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001372again:
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001373 if (++loops > 100) {
1374 WARN_ONCE(1, "perfcounters: irq loop stuck!\n");
Ingo Molnar34adc802009-05-20 20:13:28 +02001375 perf_counter_print_debug();
Ingo Molnaraaba9802009-05-26 08:10:00 +02001376 intel_pmu_reset();
1377 perf_enable();
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001378 return 1;
1379 }
1380
Mike Galbraithd278c482009-02-09 07:38:50 +01001381 inc_irq_stat(apic_perf_irqs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001382 ack = status;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001383 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Ingo Molnar862a1a52008-12-17 13:09:20 +01001384 struct perf_counter *counter = cpuc->counters[bit];
Ingo Molnar241771e2008-12-03 10:39:53 +01001385
1386 clear_bit(bit, (unsigned long *) &status);
Robert Richter43f62012009-04-29 16:55:56 +02001387 if (!test_bit(bit, cpuc->active_mask))
Ingo Molnar241771e2008-12-03 10:39:53 +01001388 continue;
1389
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001390 if (!intel_pmu_save_and_restart(counter))
1391 continue;
1392
Peter Zijlstra60f916d2009-06-15 19:00:20 +02001393 data.period = counter->hw.last_period;
1394
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001395 if (perf_counter_overflow(counter, 1, &data))
Robert Richterd4369892009-04-29 12:47:19 +02001396 intel_pmu_disable_counter(&counter->hw, bit);
Ingo Molnar241771e2008-12-03 10:39:53 +01001397 }
1398
Robert Richterdee5d902009-04-29 12:47:07 +02001399 intel_pmu_ack_status(ack);
Ingo Molnar241771e2008-12-03 10:39:53 +01001400
1401 /*
1402 * Repeat if there is more work to be done:
1403 */
Robert Richter19d84da2009-04-29 12:47:25 +02001404 status = intel_pmu_get_status();
Ingo Molnar241771e2008-12-03 10:39:53 +01001405 if (status)
1406 goto again;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001407
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001408 perf_enable();
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001409
1410 return 1;
Mike Galbraith1b023a92009-01-23 10:13:01 +01001411}
1412
Yong Wanga3288102009-06-03 13:12:55 +08001413static int amd_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001414{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001415 struct perf_sample_data data;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001416 struct cpu_hw_counters *cpuc;
Robert Richtera29aa8a2009-04-29 12:47:21 +02001417 struct perf_counter *counter;
1418 struct hw_perf_counter *hwc;
Vince Weaver11d15782009-07-08 17:46:14 -04001419 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001420 u64 val;
1421
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001422 data.regs = regs;
1423 data.addr = 0;
1424
Vince Weaver11d15782009-07-08 17:46:14 -04001425 cpuc = &__get_cpu_var(cpu_hw_counters);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001426
Robert Richtera29aa8a2009-04-29 12:47:21 +02001427 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter43f62012009-04-29 16:55:56 +02001428 if (!test_bit(idx, cpuc->active_mask))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001429 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001430
Robert Richtera29aa8a2009-04-29 12:47:21 +02001431 counter = cpuc->counters[idx];
1432 hwc = &counter->hw;
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001433
Robert Richter4b7bfd02009-04-29 12:47:22 +02001434 val = x86_perf_counter_update(counter, hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001435 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001436 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001437
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001438 /*
1439 * counter overflow
1440 */
1441 handled = 1;
1442 data.period = counter->hw.last_period;
1443
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001444 if (!x86_perf_counter_set_period(counter, hwc, idx))
1445 continue;
1446
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001447 if (perf_counter_overflow(counter, 1, &data))
Robert Richtera29aa8a2009-04-29 12:47:21 +02001448 amd_pmu_disable_counter(hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001449 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001450
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001451 if (handled)
1452 inc_irq_stat(apic_perf_irqs);
1453
Robert Richtera29aa8a2009-04-29 12:47:21 +02001454 return handled;
1455}
Robert Richter39d81ea2009-04-29 12:47:05 +02001456
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001457void smp_perf_pending_interrupt(struct pt_regs *regs)
1458{
1459 irq_enter();
1460 ack_APIC_irq();
1461 inc_irq_stat(apic_pending_irqs);
1462 perf_counter_do_pending();
1463 irq_exit();
1464}
1465
1466void set_perf_counter_pending(void)
1467{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001468#ifdef CONFIG_X86_LOCAL_APIC
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001469 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001470#endif
Peter Zijlstrab6276f32009-04-06 11:45:03 +02001471}
1472
Yong Wangc323d952009-05-29 13:28:35 +08001473void perf_counters_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001474{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001475#ifdef CONFIG_X86_LOCAL_APIC
1476 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001477 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001478
Ingo Molnar241771e2008-12-03 10:39:53 +01001479 /*
Yong Wangc323d952009-05-29 13:28:35 +08001480 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001481 */
Yong Wangc323d952009-05-29 13:28:35 +08001482 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001483#endif
Ingo Molnar241771e2008-12-03 10:39:53 +01001484}
1485
1486static int __kprobes
1487perf_counter_nmi_handler(struct notifier_block *self,
1488 unsigned long cmd, void *__args)
1489{
1490 struct die_args *args = __args;
1491 struct pt_regs *regs;
1492
Peter Zijlstraba778132009-05-04 18:47:44 +02001493 if (!atomic_read(&active_counters))
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001494 return NOTIFY_DONE;
1495
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001496 switch (cmd) {
1497 case DIE_NMI:
1498 case DIE_NMI_IPI:
1499 break;
1500
1501 default:
Ingo Molnar241771e2008-12-03 10:39:53 +01001502 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +01001503 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001504
1505 regs = args->regs;
1506
Ingo Molnar04da8a42009-08-11 10:40:08 +02001507#ifdef CONFIG_X86_LOCAL_APIC
Ingo Molnar241771e2008-12-03 10:39:53 +01001508 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar04da8a42009-08-11 10:40:08 +02001509#endif
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001510 /*
1511 * Can't rely on the handled return value to say it was our NMI, two
1512 * counters could trigger 'simultaneously' raising two back-to-back NMIs.
1513 *
1514 * If the first NMI handles both, the latter will be empty and daze
1515 * the CPU.
1516 */
Yong Wanga3288102009-06-03 13:12:55 +08001517 x86_pmu.handle_irq(regs);
Ingo Molnar241771e2008-12-03 10:39:53 +01001518
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001519 return NOTIFY_STOP;
Ingo Molnar241771e2008-12-03 10:39:53 +01001520}
1521
1522static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
Mike Galbraith5b75af02009-02-04 17:11:34 +01001523 .notifier_call = perf_counter_nmi_handler,
1524 .next = NULL,
1525 .priority = 1
Ingo Molnar241771e2008-12-03 10:39:53 +01001526};
1527
Vince Weaver11d15782009-07-08 17:46:14 -04001528static struct x86_pmu p6_pmu = {
1529 .name = "p6",
1530 .handle_irq = p6_pmu_handle_irq,
1531 .disable_all = p6_pmu_disable_all,
1532 .enable_all = p6_pmu_enable_all,
1533 .enable = p6_pmu_enable_counter,
1534 .disable = p6_pmu_disable_counter,
1535 .eventsel = MSR_P6_EVNTSEL0,
1536 .perfctr = MSR_P6_PERFCTR0,
1537 .event_map = p6_pmu_event_map,
1538 .raw_event = p6_pmu_raw_event,
1539 .max_events = ARRAY_SIZE(p6_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02001540 .apic = 1,
Vince Weaver11d15782009-07-08 17:46:14 -04001541 .max_period = (1ULL << 31) - 1,
1542 .version = 0,
1543 .num_counters = 2,
1544 /*
1545 * Counters have 40 bits implemented. However they are designed such
1546 * that bits [32-39] are sign extensions of bit 31. As such the
1547 * effective width of a counter for P6-like PMU is 32 bits only.
1548 *
1549 * See IA-32 Intel Architecture Software developer manual Vol 3B
1550 */
1551 .counter_bits = 32,
1552 .counter_mask = (1ULL << 32) - 1,
1553};
1554
Robert Richter5f4ec282009-04-29 12:47:04 +02001555static struct x86_pmu intel_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02001556 .name = "Intel",
Robert Richter39d81ea2009-04-29 12:47:05 +02001557 .handle_irq = intel_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001558 .disable_all = intel_pmu_disable_all,
1559 .enable_all = intel_pmu_enable_all,
Robert Richter5f4ec282009-04-29 12:47:04 +02001560 .enable = intel_pmu_enable_counter,
1561 .disable = intel_pmu_disable_counter,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301562 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
1563 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02001564 .event_map = intel_pmu_event_map,
1565 .raw_event = intel_pmu_raw_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301566 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
Ingo Molnar04da8a42009-08-11 10:40:08 +02001567 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02001568 /*
1569 * Intel PMCs cannot be accessed sanely above 32 bit width,
1570 * so we install an artificial 1<<31 period regardless of
1571 * the generic counter period:
1572 */
1573 .max_period = (1ULL << 31) - 1,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301574};
1575
Robert Richter5f4ec282009-04-29 12:47:04 +02001576static struct x86_pmu amd_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +02001577 .name = "AMD",
Robert Richter39d81ea2009-04-29 12:47:05 +02001578 .handle_irq = amd_pmu_handle_irq,
Peter Zijlstra9e35ad32009-05-13 16:21:38 +02001579 .disable_all = amd_pmu_disable_all,
1580 .enable_all = amd_pmu_enable_all,
Robert Richter5f4ec282009-04-29 12:47:04 +02001581 .enable = amd_pmu_enable_counter,
1582 .disable = amd_pmu_disable_counter,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301583 .eventsel = MSR_K7_EVNTSEL0,
1584 .perfctr = MSR_K7_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +02001585 .event_map = amd_pmu_event_map,
1586 .raw_event = amd_pmu_raw_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301587 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
Robert Richter0933e5c2009-04-29 12:47:12 +02001588 .num_counters = 4,
1589 .counter_bits = 48,
1590 .counter_mask = (1ULL << 48) - 1,
Ingo Molnar04da8a42009-08-11 10:40:08 +02001591 .apic = 1,
Robert Richterc619b8f2009-04-29 12:47:23 +02001592 /* use highest bit to detect overflow */
1593 .max_period = (1ULL << 47) - 1,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301594};
1595
Vince Weaver11d15782009-07-08 17:46:14 -04001596static int p6_pmu_init(void)
1597{
Vince Weaver11d15782009-07-08 17:46:14 -04001598 switch (boot_cpu_data.x86_model) {
1599 case 1:
1600 case 3: /* Pentium Pro */
1601 case 5:
1602 case 6: /* Pentium II */
1603 case 7:
1604 case 8:
1605 case 11: /* Pentium III */
1606 break;
1607 case 9:
1608 case 13:
Daniel Qarrasf1c6a582009-07-12 04:32:40 -07001609 /* Pentium M */
1610 break;
Vince Weaver11d15782009-07-08 17:46:14 -04001611 default:
1612 pr_cont("unsupported p6 CPU model %d ",
1613 boot_cpu_data.x86_model);
1614 return -ENODEV;
1615 }
1616
Ingo Molnar04da8a42009-08-11 10:40:08 +02001617 x86_pmu = p6_pmu;
1618
Vince Weaver11d15782009-07-08 17:46:14 -04001619 if (!cpu_has_apic) {
Ingo Molnar3c581a72009-08-11 10:47:36 +02001620 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
Ingo Molnar04da8a42009-08-11 10:40:08 +02001621 pr_info("no hardware sampling interrupt available.\n");
1622 x86_pmu.apic = 0;
Vince Weaver11d15782009-07-08 17:46:14 -04001623 }
1624
Vince Weaver11d15782009-07-08 17:46:14 -04001625 return 0;
1626}
1627
Robert Richter72eae042009-04-29 12:47:10 +02001628static int intel_pmu_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001629{
Ingo Molnar703e9372008-12-17 10:51:15 +01001630 union cpuid10_edx edx;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01001631 union cpuid10_eax eax;
1632 unsigned int unused;
1633 unsigned int ebx;
Robert Richterfaa28ae2009-04-29 12:47:13 +02001634 int version;
Ingo Molnar241771e2008-12-03 10:39:53 +01001635
Vince Weaver11d15782009-07-08 17:46:14 -04001636 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON)) {
1637 /* check for P6 processor family */
1638 if (boot_cpu_data.x86 == 6) {
1639 return p6_pmu_init();
1640 } else {
Robert Richter72eae042009-04-29 12:47:10 +02001641 return -ENODEV;
Vince Weaver11d15782009-07-08 17:46:14 -04001642 }
1643 }
Robert Richterda1a7762009-04-29 12:46:58 +02001644
Ingo Molnar241771e2008-12-03 10:39:53 +01001645 /*
1646 * Check whether the Architectural PerfMon supports
1647 * Branch Misses Retired Event or not.
1648 */
Ingo Molnar703e9372008-12-17 10:51:15 +01001649 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
Ingo Molnar241771e2008-12-03 10:39:53 +01001650 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
Robert Richter72eae042009-04-29 12:47:10 +02001651 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +01001652
Robert Richterfaa28ae2009-04-29 12:47:13 +02001653 version = eax.split.version_id;
1654 if (version < 2)
Robert Richter72eae042009-04-29 12:47:10 +02001655 return -ENODEV;
Ingo Molnar7bb497b2009-03-18 08:59:21 +01001656
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001657 x86_pmu = intel_pmu;
1658 x86_pmu.version = version;
1659 x86_pmu.num_counters = eax.split.num_counters;
1660 x86_pmu.counter_bits = eax.split.bit_width;
1661 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
Ingo Molnar066d7de2009-05-04 19:04:09 +02001662
1663 /*
1664 * Quirk: v2 perfmon does not report fixed-purpose counters, so
1665 * assume at least 3 counters:
1666 */
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001667 x86_pmu.num_counters_fixed = max((int)edx.split.num_counters_fixed, 3);
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301668
Ingo Molnar8326f442009-06-05 20:22:46 +02001669 /*
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001670 * Install the hw-cache-events table:
Ingo Molnar8326f442009-06-05 20:22:46 +02001671 */
1672 switch (boot_cpu_data.x86_model) {
Yong Wangdc810812009-06-10 17:06:12 +08001673 case 15: /* original 65 nm celeron/pentium/core2/xeon, "Merom"/"Conroe" */
1674 case 22: /* single-core 65 nm celeron/core2solo "Merom-L"/"Conroe-L" */
1675 case 23: /* current 45 nm celeron/core2/xeon "Penryn"/"Wolfdale" */
1676 case 29: /* six-core 45 nm xeon "Dunnington" */
Ingo Molnar8326f442009-06-05 20:22:46 +02001677 memcpy(hw_cache_event_ids, core2_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02001678 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02001679
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001680 pr_cont("Core2 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02001681 break;
1682 default:
1683 case 26:
1684 memcpy(hw_cache_event_ids, nehalem_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02001685 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02001686
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001687 pr_cont("Nehalem/Corei7 events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02001688 break;
1689 case 28:
1690 memcpy(hw_cache_event_ids, atom_hw_cache_event_ids,
Thomas Gleixner820a6442009-06-08 19:10:25 +02001691 sizeof(hw_cache_event_ids));
Ingo Molnar8326f442009-06-05 20:22:46 +02001692
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001693 pr_cont("Atom events, ");
Ingo Molnar8326f442009-06-05 20:22:46 +02001694 break;
1695 }
Robert Richter72eae042009-04-29 12:47:10 +02001696 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301697}
1698
Robert Richter72eae042009-04-29 12:47:10 +02001699static int amd_pmu_init(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301700{
Jaswinder Singh Rajput4d2be122009-06-11 15:28:09 +05301701 /* Performance-monitoring supported from K7 and later: */
1702 if (boot_cpu_data.x86 < 6)
1703 return -ENODEV;
1704
Robert Richter4a06bd82009-04-29 12:47:11 +02001705 x86_pmu = amd_pmu;
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02001706
Jaswinder Singh Rajputf4db43a2009-06-13 01:06:21 +05301707 /* Events are common for all AMDs */
1708 memcpy(hw_cache_event_ids, amd_hw_cache_event_ids,
1709 sizeof(hw_cache_event_ids));
Thomas Gleixnerf86748e2009-06-08 22:33:10 +02001710
Robert Richter72eae042009-04-29 12:47:10 +02001711 return 0;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301712}
1713
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301714void __init init_hw_perf_counters(void)
1715{
Robert Richter72eae042009-04-29 12:47:10 +02001716 int err;
1717
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001718 pr_info("Performance Counters: ");
1719
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301720 switch (boot_cpu_data.x86_vendor) {
1721 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001722 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301723 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301724 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001725 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301726 break;
Robert Richter41389602009-04-29 12:47:00 +02001727 default:
1728 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301729 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001730 if (err != 0) {
1731 pr_cont("no PMU driver, software counters only.\n");
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301732 return;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001733 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301734
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001735 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001736
Robert Richter0933e5c2009-04-29 12:47:12 +02001737 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
Ingo Molnar241771e2008-12-03 10:39:53 +01001738 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
Robert Richter0933e5c2009-04-29 12:47:12 +02001739 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
Yinghai Lu4078c442009-06-29 00:41:11 -07001740 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +01001741 }
Robert Richter0933e5c2009-04-29 12:47:12 +02001742 perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
1743 perf_max_counters = x86_pmu.num_counters;
Ingo Molnar241771e2008-12-03 10:39:53 +01001744
Robert Richter0933e5c2009-04-29 12:47:12 +02001745 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
Ingo Molnar703e9372008-12-17 10:51:15 +01001746 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
Robert Richter0933e5c2009-04-29 12:47:12 +02001747 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
Yinghai Lu4078c442009-06-29 00:41:11 -07001748 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001749 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001750
Robert Richter0933e5c2009-04-29 12:47:12 +02001751 perf_counter_mask |=
1752 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
Yong Wangc14dab52009-06-24 10:13:24 +08001753 x86_pmu.intel_ctrl = perf_counter_mask;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001754
Yong Wangc323d952009-05-29 13:28:35 +08001755 perf_counters_lapic_init();
Ingo Molnar241771e2008-12-03 10:39:53 +01001756 register_die_notifier(&perf_counter_nmi_notifier);
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001757
1758 pr_info("... version: %d\n", x86_pmu.version);
1759 pr_info("... bit width: %d\n", x86_pmu.counter_bits);
1760 pr_info("... generic counters: %d\n", x86_pmu.num_counters);
1761 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
1762 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
1763 pr_info("... fixed-purpose counters: %d\n", x86_pmu.num_counters_fixed);
1764 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001765}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001766
Robert Richterbb775fc2009-04-29 12:47:14 +02001767static inline void x86_pmu_read(struct perf_counter *counter)
Ingo Molnaree060942008-12-13 09:00:03 +01001768{
1769 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
1770}
1771
Robert Richter4aeb0b42009-04-29 12:47:03 +02001772static const struct pmu pmu = {
1773 .enable = x86_pmu_enable,
1774 .disable = x86_pmu_disable,
1775 .read = x86_pmu_read,
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001776 .unthrottle = x86_pmu_unthrottle,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001777};
1778
Robert Richter4aeb0b42009-04-29 12:47:03 +02001779const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001780{
1781 int err;
1782
1783 err = __hw_perf_counter_init(counter);
1784 if (err)
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001785 return ERR_PTR(err);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001786
Robert Richter4aeb0b42009-04-29 12:47:03 +02001787 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001788}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001789
1790/*
1791 * callchain support
1792 */
1793
1794static inline
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001795void callchain_store(struct perf_callchain_entry *entry, u64 ip)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001796{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001797 if (entry->nr < PERF_MAX_STACK_DEPTH)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001798 entry->ip[entry->nr++] = ip;
1799}
1800
1801static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
1802static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
Frederic Weisbecker0406ca62009-07-01 21:02:09 +02001803static DEFINE_PER_CPU(int, in_nmi_frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001804
1805
1806static void
1807backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1808{
1809 /* Ignore warnings */
1810}
1811
1812static void backtrace_warning(void *data, char *msg)
1813{
1814 /* Ignore warnings */
1815}
1816
1817static int backtrace_stack(void *data, char *name)
1818{
Frederic Weisbecker0406ca62009-07-01 21:02:09 +02001819 per_cpu(in_nmi_frame, smp_processor_id()) =
1820 x86_is_stack_id(NMI_STACK, name);
1821
Ingo Molnar038e8362009-06-15 09:57:59 +02001822 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001823}
1824
1825static void backtrace_address(void *data, unsigned long addr, int reliable)
1826{
1827 struct perf_callchain_entry *entry = data;
1828
Frederic Weisbecker0406ca62009-07-01 21:02:09 +02001829 if (per_cpu(in_nmi_frame, smp_processor_id()))
1830 return;
1831
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001832 if (reliable)
1833 callchain_store(entry, addr);
1834}
1835
1836static const struct stacktrace_ops backtrace_ops = {
1837 .warning = backtrace_warning,
1838 .warning_symbol = backtrace_warning_symbol,
1839 .stack = backtrace_stack,
1840 .address = backtrace_address,
1841};
1842
Ingo Molnar038e8362009-06-15 09:57:59 +02001843#include "../dumpstack.h"
1844
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001845static void
1846perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1847{
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001848 callchain_store(entry, PERF_CONTEXT_KERNEL);
Ingo Molnar038e8362009-06-15 09:57:59 +02001849 callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001850
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001851 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001852}
1853
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001854/*
1855 * best effort, GUP based copy_from_user() that assumes IRQ or NMI context
1856 */
1857static unsigned long
1858copy_from_user_nmi(void *to, const void __user *from, unsigned long n)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001859{
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001860 unsigned long offset, addr = (unsigned long)from;
1861 int type = in_nmi() ? KM_NMI : KM_IRQ0;
1862 unsigned long size, len = 0;
1863 struct page *page;
1864 void *map;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001865 int ret;
1866
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001867 do {
1868 ret = __get_user_pages_fast(addr, 1, 0, &page);
1869 if (!ret)
1870 break;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001871
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001872 offset = addr & (PAGE_SIZE - 1);
1873 size = min(PAGE_SIZE - offset, n - len);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001874
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001875 map = kmap_atomic(page, type);
1876 memcpy(to, map+offset, size);
1877 kunmap_atomic(map, type);
1878 put_page(page);
1879
1880 len += size;
1881 to += size;
1882 addr += size;
1883
1884 } while (len < n);
1885
1886 return len;
1887}
1888
1889static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1890{
1891 unsigned long bytes;
1892
1893 bytes = copy_from_user_nmi(frame, fp, sizeof(*frame));
1894
1895 return bytes == sizeof(*frame);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001896}
1897
1898static void
1899perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1900{
1901 struct stack_frame frame;
1902 const void __user *fp;
1903
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001904 if (!user_mode(regs))
1905 regs = task_pt_regs(current);
1906
Peter Zijlstra74193ef2009-06-15 13:07:24 +02001907 fp = (void __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001908
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001909 callchain_store(entry, PERF_CONTEXT_USER);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001910 callchain_store(entry, regs->ip);
1911
Peter Zijlstraf9188e02009-06-18 22:20:52 +02001912 while (entry->nr < PERF_MAX_STACK_DEPTH) {
Ingo Molnar038e8362009-06-15 09:57:59 +02001913 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001914 frame.return_address = 0;
1915
1916 if (!copy_stack_frame(fp, &frame))
1917 break;
1918
Ingo Molnar5a6cec32009-05-29 11:25:09 +02001919 if ((unsigned long)fp < regs->sp)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001920 break;
1921
1922 callchain_store(entry, frame.return_address);
Ingo Molnar038e8362009-06-15 09:57:59 +02001923 fp = frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001924 }
1925}
1926
1927static void
1928perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1929{
1930 int is_user;
1931
1932 if (!regs)
1933 return;
1934
1935 is_user = user_mode(regs);
1936
1937 if (!current || current->pid == 0)
1938 return;
1939
1940 if (is_user && current->state != TASK_RUNNING)
1941 return;
1942
1943 if (!is_user)
1944 perf_callchain_kernel(regs, entry);
1945
1946 if (current->mm)
1947 perf_callchain_user(regs, entry);
1948}
1949
1950struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1951{
1952 struct perf_callchain_entry *entry;
1953
1954 if (in_nmi())
1955 entry = &__get_cpu_var(nmi_entry);
1956 else
1957 entry = &__get_cpu_var(irq_entry);
1958
1959 entry->nr = 0;
1960
1961 perf_do_callchain(regs, entry);
1962
1963 return entry;
1964}