blob: a8a53abd706d4944522f5dc2d3aa92bd22b4e99f [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
2 * Performance counter x86 architecture code
3 *
4 * Copyright(C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright(C) 2008 Red Hat, Inc., Ingo Molnar
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05306 * Copyright(C) 2009 Jaswinder Singh Rajput
Robert Richter39d81ea2009-04-29 12:47:05 +02007 * Copyright(C) 2009 Advanced Micro Devices, Inc., Robert Richter
Ingo Molnar241771e2008-12-03 10:39:53 +01008 *
9 * For licencing details see kernel-base/COPYING
10 */
11
12#include <linux/perf_counter.h>
13#include <linux/capability.h>
14#include <linux/notifier.h>
15#include <linux/hardirq.h>
16#include <linux/kprobes.h>
Thomas Gleixner4ac13292008-12-09 21:43:39 +010017#include <linux/module.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010018#include <linux/kdebug.h>
19#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020020#include <linux/uaccess.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010021
Ingo Molnar241771e2008-12-03 10:39:53 +010022#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020023#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020024#include <asm/nmi.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010025
Ingo Molnar862a1a52008-12-17 13:09:20 +010026static u64 perf_counter_mask __read_mostly;
Ingo Molnar703e9372008-12-17 10:51:15 +010027
Ingo Molnar241771e2008-12-03 10:39:53 +010028struct cpu_hw_counters {
Ingo Molnar862a1a52008-12-17 13:09:20 +010029 struct perf_counter *counters[X86_PMC_IDX_MAX];
30 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Robert Richter93904962009-04-29 12:47:15 +020031 unsigned long active[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Mike Galbraith4b39fd92009-01-23 14:36:16 +010032 unsigned long interrupts;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010033 u64 throttle_ctrl;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010034 int enabled;
Ingo Molnar241771e2008-12-03 10:39:53 +010035};
36
37/*
Robert Richter5f4ec282009-04-29 12:47:04 +020038 * struct x86_pmu - generic x86 pmu
Ingo Molnar241771e2008-12-03 10:39:53 +010039 */
Robert Richter5f4ec282009-04-29 12:47:04 +020040struct x86_pmu {
Robert Richterfaa28ae2009-04-29 12:47:13 +020041 const char *name;
42 int version;
Robert Richter39d81ea2009-04-29 12:47:05 +020043 int (*handle_irq)(struct pt_regs *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053044 u64 (*save_disable_all)(void);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010045 void (*restore_all)(u64);
Robert Richter7c90cc42009-04-29 12:47:18 +020046 void (*enable)(struct hw_perf_counter *, int);
Robert Richterd4369892009-04-29 12:47:19 +020047 void (*disable)(struct hw_perf_counter *, int);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053048 unsigned eventsel;
49 unsigned perfctr;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010050 u64 (*event_map)(int);
51 u64 (*raw_event)(u64);
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +053052 int max_events;
Robert Richter0933e5c2009-04-29 12:47:12 +020053 int num_counters;
54 int num_counters_fixed;
55 int counter_bits;
56 u64 counter_mask;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053057};
58
Robert Richter4a06bd82009-04-29 12:47:11 +020059static struct x86_pmu x86_pmu __read_mostly;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053060
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010061static DEFINE_PER_CPU(struct cpu_hw_counters, cpu_hw_counters) = {
62 .enabled = 1,
63};
Ingo Molnar241771e2008-12-03 10:39:53 +010064
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053065/*
66 * Intel PerfMon v3. Used on Core2 and later.
67 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010068static const u64 intel_perfmon_event_map[] =
Ingo Molnar241771e2008-12-03 10:39:53 +010069{
Ingo Molnarf650a672008-12-23 12:17:29 +010070 [PERF_COUNT_CPU_CYCLES] = 0x003c,
Ingo Molnar241771e2008-12-03 10:39:53 +010071 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
72 [PERF_COUNT_CACHE_REFERENCES] = 0x4f2e,
73 [PERF_COUNT_CACHE_MISSES] = 0x412e,
74 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
75 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
Ingo Molnarf650a672008-12-23 12:17:29 +010076 [PERF_COUNT_BUS_CYCLES] = 0x013c,
Ingo Molnar241771e2008-12-03 10:39:53 +010077};
78
Robert Richter5f4ec282009-04-29 12:47:04 +020079static u64 intel_pmu_event_map(int event)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +053080{
81 return intel_perfmon_event_map[event];
82}
Ingo Molnar241771e2008-12-03 10:39:53 +010083
Robert Richter5f4ec282009-04-29 12:47:04 +020084static u64 intel_pmu_raw_event(u64 event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010085{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +010086#define CORE_EVNTSEL_EVENT_MASK 0x000000FFULL
87#define CORE_EVNTSEL_UNIT_MASK 0x0000FF00ULL
88#define CORE_EVNTSEL_COUNTER_MASK 0xFF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010089
90#define CORE_EVNTSEL_MASK \
91 (CORE_EVNTSEL_EVENT_MASK | \
92 CORE_EVNTSEL_UNIT_MASK | \
93 CORE_EVNTSEL_COUNTER_MASK)
94
95 return event & CORE_EVNTSEL_MASK;
96}
97
Ingo Molnar241771e2008-12-03 10:39:53 +010098/*
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +053099 * AMD Performance Monitor K7 and later.
100 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100101static const u64 amd_perfmon_event_map[] =
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530102{
103 [PERF_COUNT_CPU_CYCLES] = 0x0076,
104 [PERF_COUNT_INSTRUCTIONS] = 0x00c0,
105 [PERF_COUNT_CACHE_REFERENCES] = 0x0080,
106 [PERF_COUNT_CACHE_MISSES] = 0x0081,
107 [PERF_COUNT_BRANCH_INSTRUCTIONS] = 0x00c4,
108 [PERF_COUNT_BRANCH_MISSES] = 0x00c5,
109};
110
Robert Richter5f4ec282009-04-29 12:47:04 +0200111static u64 amd_pmu_event_map(int event)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530112{
113 return amd_perfmon_event_map[event];
114}
115
Robert Richter5f4ec282009-04-29 12:47:04 +0200116static u64 amd_pmu_raw_event(u64 event)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100117{
Peter Zijlstra82bae4f82009-03-13 12:21:31 +0100118#define K7_EVNTSEL_EVENT_MASK 0x7000000FFULL
119#define K7_EVNTSEL_UNIT_MASK 0x00000FF00ULL
120#define K7_EVNTSEL_COUNTER_MASK 0x0FF000000ULL
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100121
122#define K7_EVNTSEL_MASK \
123 (K7_EVNTSEL_EVENT_MASK | \
124 K7_EVNTSEL_UNIT_MASK | \
125 K7_EVNTSEL_COUNTER_MASK)
126
127 return event & K7_EVNTSEL_MASK;
128}
129
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530130/*
Ingo Molnaree060942008-12-13 09:00:03 +0100131 * Propagate counter elapsed time into the generic counter.
132 * Can only be executed on the CPU where the counter is active.
133 * Returns the delta events processed.
134 */
Robert Richter4b7bfd02009-04-29 12:47:22 +0200135static u64
Ingo Molnaree060942008-12-13 09:00:03 +0100136x86_perf_counter_update(struct perf_counter *counter,
137 struct hw_perf_counter *hwc, int idx)
138{
139 u64 prev_raw_count, new_raw_count, delta;
140
Ingo Molnaree060942008-12-13 09:00:03 +0100141 /*
142 * Careful: an NMI might modify the previous counter value.
143 *
144 * Our tactic to handle this is to first atomically read and
145 * exchange a new raw count - then add that new-prev delta
146 * count to the generic counter atomically:
147 */
148again:
149 prev_raw_count = atomic64_read(&hwc->prev_count);
150 rdmsrl(hwc->counter_base + idx, new_raw_count);
151
152 if (atomic64_cmpxchg(&hwc->prev_count, prev_raw_count,
153 new_raw_count) != prev_raw_count)
154 goto again;
155
156 /*
157 * Now we have the new raw value and have updated the prev
158 * timestamp already. We can now calculate the elapsed delta
159 * (counter-)time and add that to the generic counter.
160 *
161 * Careful, not all hw sign-extends above the physical width
162 * of the count, so we do that by clipping the delta to 32 bits:
163 */
164 delta = (u64)(u32)((s32)new_raw_count - (s32)prev_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +0100165
166 atomic64_add(delta, &counter->count);
167 atomic64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200168
169 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100170}
171
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200172static atomic_t num_counters;
173static DEFINE_MUTEX(pmc_reserve_mutex);
174
175static bool reserve_pmc_hardware(void)
176{
177 int i;
178
179 if (nmi_watchdog == NMI_LOCAL_APIC)
180 disable_lapic_nmi_watchdog();
181
Robert Richter0933e5c2009-04-29 12:47:12 +0200182 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200183 if (!reserve_perfctr_nmi(x86_pmu.perfctr + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200184 goto perfctr_fail;
185 }
186
Robert Richter0933e5c2009-04-29 12:47:12 +0200187 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200188 if (!reserve_evntsel_nmi(x86_pmu.eventsel + i))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200189 goto eventsel_fail;
190 }
191
192 return true;
193
194eventsel_fail:
195 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200196 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200197
Robert Richter0933e5c2009-04-29 12:47:12 +0200198 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200199
200perfctr_fail:
201 for (i--; i >= 0; i--)
Robert Richter4a06bd82009-04-29 12:47:11 +0200202 release_perfctr_nmi(x86_pmu.perfctr + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200203
204 if (nmi_watchdog == NMI_LOCAL_APIC)
205 enable_lapic_nmi_watchdog();
206
207 return false;
208}
209
210static void release_pmc_hardware(void)
211{
212 int i;
213
Robert Richter0933e5c2009-04-29 12:47:12 +0200214 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200215 release_perfctr_nmi(x86_pmu.perfctr + i);
216 release_evntsel_nmi(x86_pmu.eventsel + i);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200217 }
218
219 if (nmi_watchdog == NMI_LOCAL_APIC)
220 enable_lapic_nmi_watchdog();
221}
222
223static void hw_perf_counter_destroy(struct perf_counter *counter)
224{
225 if (atomic_dec_and_mutex_lock(&num_counters, &pmc_reserve_mutex)) {
226 release_pmc_hardware();
227 mutex_unlock(&pmc_reserve_mutex);
228 }
229}
230
Robert Richter85cf9db2009-04-29 12:47:20 +0200231static inline int x86_pmu_initialized(void)
232{
233 return x86_pmu.handle_irq != NULL;
234}
235
Ingo Molnaree060942008-12-13 09:00:03 +0100236/*
Ingo Molnar241771e2008-12-03 10:39:53 +0100237 * Setup the hardware configuration for a given hw_event_type
238 */
Ingo Molnar621a01e2008-12-11 12:46:46 +0100239static int __hw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100240{
Ingo Molnar9f66a382008-12-10 12:33:23 +0100241 struct perf_counter_hw_event *hw_event = &counter->hw_event;
Ingo Molnar241771e2008-12-03 10:39:53 +0100242 struct hw_perf_counter *hwc = &counter->hw;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200243 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100244
Robert Richter85cf9db2009-04-29 12:47:20 +0200245 if (!x86_pmu_initialized())
246 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100247
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200248 err = 0;
249 if (atomic_inc_not_zero(&num_counters)) {
250 mutex_lock(&pmc_reserve_mutex);
251 if (atomic_read(&num_counters) == 0 && !reserve_pmc_hardware())
252 err = -EBUSY;
253 else
254 atomic_inc(&num_counters);
255 mutex_unlock(&pmc_reserve_mutex);
256 }
257 if (err)
258 return err;
259
Ingo Molnar241771e2008-12-03 10:39:53 +0100260 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100261 * Generate PMC IRQs:
Ingo Molnar241771e2008-12-03 10:39:53 +0100262 * (keep 'enabled' bit clear for now)
263 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100264 hwc->config = ARCH_PERFMON_EVENTSEL_INT;
Ingo Molnar241771e2008-12-03 10:39:53 +0100265
266 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100267 * Count user and OS events unless requested not to.
268 */
269 if (!hw_event->exclude_user)
270 hwc->config |= ARCH_PERFMON_EVENTSEL_USR;
271 if (!hw_event->exclude_kernel)
272 hwc->config |= ARCH_PERFMON_EVENTSEL_OS;
273
274 /*
275 * If privileged enough, allow NMI events:
Ingo Molnar241771e2008-12-03 10:39:53 +0100276 */
277 hwc->nmi = 0;
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100278 if (capable(CAP_SYS_ADMIN) && hw_event->nmi)
279 hwc->nmi = 1;
Ingo Molnar241771e2008-12-03 10:39:53 +0100280
Ingo Molnar9f66a382008-12-10 12:33:23 +0100281 hwc->irq_period = hw_event->irq_period;
Ingo Molnar241771e2008-12-03 10:39:53 +0100282 /*
283 * Intel PMCs cannot be accessed sanely above 32 bit width,
284 * so we install an artificial 1<<31 period regardless of
285 * the generic counter period:
286 */
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530287 if (boot_cpu_data.x86_vendor == X86_VENDOR_INTEL)
288 if ((s64)hwc->irq_period <= 0 || hwc->irq_period > 0x7FFFFFFF)
289 hwc->irq_period = 0x7FFFFFFF;
Ingo Molnar241771e2008-12-03 10:39:53 +0100290
Ingo Molnaree060942008-12-13 09:00:03 +0100291 atomic64_set(&hwc->period_left, hwc->irq_period);
Ingo Molnar241771e2008-12-03 10:39:53 +0100292
293 /*
Thomas Gleixnerdfa7c892008-12-08 19:35:37 +0100294 * Raw event type provide the config in the event structure
Ingo Molnar241771e2008-12-03 10:39:53 +0100295 */
Peter Zijlstraf4a2deb42009-03-23 18:22:06 +0100296 if (perf_event_raw(hw_event)) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200297 hwc->config |= x86_pmu.raw_event(perf_event_config(hw_event));
Ingo Molnar241771e2008-12-03 10:39:53 +0100298 } else {
Robert Richter4a06bd82009-04-29 12:47:11 +0200299 if (perf_event_id(hw_event) >= x86_pmu.max_events)
Ingo Molnar241771e2008-12-03 10:39:53 +0100300 return -EINVAL;
301 /*
302 * The generic map:
303 */
Robert Richter4a06bd82009-04-29 12:47:11 +0200304 hwc->config |= x86_pmu.event_map(perf_event_id(hw_event));
Ingo Molnar241771e2008-12-03 10:39:53 +0100305 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100306
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200307 counter->destroy = hw_perf_counter_destroy;
308
Ingo Molnar241771e2008-12-03 10:39:53 +0100309 return 0;
310}
311
Robert Richter5f4ec282009-04-29 12:47:04 +0200312static u64 intel_pmu_save_disable_all(void)
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100313{
314 u64 ctrl;
315
316 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
Ingo Molnar862a1a52008-12-17 13:09:20 +0100317 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, 0);
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100318
Thomas Gleixner4ac13292008-12-09 21:43:39 +0100319 return ctrl;
320}
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530321
Robert Richter5f4ec282009-04-29 12:47:04 +0200322static u64 amd_pmu_save_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530323{
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100324 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
325 int enabled, idx;
326
327 enabled = cpuc->enabled;
328 cpuc->enabled = 0;
Peter Zijlstra60b3df92009-03-13 12:21:30 +0100329 /*
330 * ensure we write the disable before we start disabling the
Robert Richter5f4ec282009-04-29 12:47:04 +0200331 * counters proper, so that amd_pmu_enable_counter() does the
332 * right thing.
Peter Zijlstra60b3df92009-03-13 12:21:30 +0100333 */
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100334 barrier();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530335
Robert Richter0933e5c2009-04-29 12:47:12 +0200336 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100337 u64 val;
338
Robert Richter93904962009-04-29 12:47:15 +0200339 if (!test_bit(idx, cpuc->active))
Robert Richter4295ee62009-04-29 12:47:01 +0200340 continue;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530341 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
Robert Richter4295ee62009-04-29 12:47:01 +0200342 if (!(val & ARCH_PERFMON_EVENTSEL0_ENABLE))
343 continue;
344 val &= ~ARCH_PERFMON_EVENTSEL0_ENABLE;
345 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530346 }
347
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100348 return enabled;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530349}
350
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530351u64 hw_perf_save_disable(void)
352{
Robert Richter85cf9db2009-04-29 12:47:20 +0200353 if (!x86_pmu_initialized())
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530354 return 0;
Robert Richter4a06bd82009-04-29 12:47:11 +0200355 return x86_pmu.save_disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530356}
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100357/*
358 * Exported because of ACPI idle
359 */
Ingo Molnar01b28382008-12-11 13:45:51 +0100360EXPORT_SYMBOL_GPL(hw_perf_save_disable);
Ingo Molnar241771e2008-12-03 10:39:53 +0100361
Robert Richter5f4ec282009-04-29 12:47:04 +0200362static void intel_pmu_restore_all(u64 ctrl)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530363{
364 wrmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
365}
366
Robert Richter5f4ec282009-04-29 12:47:04 +0200367static void amd_pmu_restore_all(u64 ctrl)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530368{
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100369 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530370 int idx;
371
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100372 cpuc->enabled = ctrl;
373 barrier();
374 if (!ctrl)
375 return;
376
Robert Richter0933e5c2009-04-29 12:47:12 +0200377 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4295ee62009-04-29 12:47:01 +0200378 u64 val;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100379
Robert Richter93904962009-04-29 12:47:15 +0200380 if (!test_bit(idx, cpuc->active))
Robert Richter4295ee62009-04-29 12:47:01 +0200381 continue;
382 rdmsrl(MSR_K7_EVNTSEL0 + idx, val);
383 if (val & ARCH_PERFMON_EVENTSEL0_ENABLE)
384 continue;
385 val |= ARCH_PERFMON_EVENTSEL0_ENABLE;
386 wrmsrl(MSR_K7_EVNTSEL0 + idx, val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530387 }
388}
389
Ingo Molnaree060942008-12-13 09:00:03 +0100390void hw_perf_restore(u64 ctrl)
391{
Robert Richter85cf9db2009-04-29 12:47:20 +0200392 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +0100393 return;
Robert Richter4a06bd82009-04-29 12:47:11 +0200394 x86_pmu.restore_all(ctrl);
Ingo Molnaree060942008-12-13 09:00:03 +0100395}
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100396/*
397 * Exported because of ACPI idle
398 */
Ingo Molnaree060942008-12-13 09:00:03 +0100399EXPORT_SYMBOL_GPL(hw_perf_restore);
400
Robert Richterb7f88592009-04-29 12:47:06 +0200401static inline u64 intel_pmu_get_status(u64 mask)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100402{
403 u64 status;
404
405 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
406
407 return status;
408}
409
Robert Richterdee5d902009-04-29 12:47:07 +0200410static inline void intel_pmu_ack_status(u64 ack)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100411{
412 wrmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, ack);
413}
414
Robert Richter7c90cc42009-04-29 12:47:18 +0200415static inline void x86_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100416{
Robert Richter7c90cc42009-04-29 12:47:18 +0200417 int err;
Robert Richter7c90cc42009-04-29 12:47:18 +0200418 err = checking_wrmsrl(hwc->config_base + idx,
419 hwc->config | ARCH_PERFMON_EVENTSEL0_ENABLE);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100420}
421
Robert Richterd4369892009-04-29 12:47:19 +0200422static inline void x86_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100423{
Robert Richterd4369892009-04-29 12:47:19 +0200424 int err;
Robert Richterd4369892009-04-29 12:47:19 +0200425 err = checking_wrmsrl(hwc->config_base + idx,
426 hwc->config);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100427}
428
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100429static inline void
Robert Richterd4369892009-04-29 12:47:19 +0200430intel_pmu_disable_fixed(struct hw_perf_counter *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100431{
432 int idx = __idx - X86_PMC_IDX_FIXED;
433 u64 ctrl_val, mask;
434 int err;
435
436 mask = 0xfULL << (idx * 4);
437
438 rdmsrl(hwc->config_base, ctrl_val);
439 ctrl_val &= ~mask;
440 err = checking_wrmsrl(hwc->config_base, ctrl_val);
441}
442
443static inline void
Robert Richterd4369892009-04-29 12:47:19 +0200444intel_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100445{
Robert Richterd4369892009-04-29 12:47:19 +0200446 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
447 intel_pmu_disable_fixed(hwc, idx);
448 return;
449 }
450
451 x86_pmu_disable_counter(hwc, idx);
452}
453
454static inline void
455amd_pmu_disable_counter(struct hw_perf_counter *hwc, int idx)
456{
457 x86_pmu_disable_counter(hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100458}
459
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100460static DEFINE_PER_CPU(u64, prev_left[X86_PMC_IDX_MAX]);
Ingo Molnar241771e2008-12-03 10:39:53 +0100461
Ingo Molnaree060942008-12-13 09:00:03 +0100462/*
463 * Set the next IRQ period, based on the hwc->period_left value.
464 * To be called with the counter disabled in hw:
465 */
466static void
Robert Richter26816c22009-04-29 12:47:08 +0200467x86_perf_counter_set_period(struct perf_counter *counter,
Ingo Molnaree060942008-12-13 09:00:03 +0100468 struct hw_perf_counter *hwc, int idx)
Ingo Molnar241771e2008-12-03 10:39:53 +0100469{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100470 s64 left = atomic64_read(&hwc->period_left);
Peter Zijlstra595258a2009-03-13 12:21:28 +0100471 s64 period = hwc->irq_period;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100472 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100473
Ingo Molnaree060942008-12-13 09:00:03 +0100474 /*
475 * If we are way outside a reasoable range then just skip forward:
476 */
477 if (unlikely(left <= -period)) {
478 left = period;
479 atomic64_set(&hwc->period_left, left);
480 }
481
482 if (unlikely(left <= 0)) {
483 left += period;
484 atomic64_set(&hwc->period_left, left);
485 }
486
Ingo Molnaree060942008-12-13 09:00:03 +0100487 per_cpu(prev_left[idx], smp_processor_id()) = left;
488
489 /*
490 * The hw counter starts counting from this counter offset,
491 * mark it to be able to extra future deltas:
492 */
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100493 atomic64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +0100494
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100495 err = checking_wrmsrl(hwc->counter_base + idx,
Robert Richter0933e5c2009-04-29 12:47:12 +0200496 (u64)(-left) & x86_pmu.counter_mask);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100497}
498
499static inline void
Robert Richter7c90cc42009-04-29 12:47:18 +0200500intel_pmu_enable_fixed(struct hw_perf_counter *hwc, int __idx)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100501{
502 int idx = __idx - X86_PMC_IDX_FIXED;
503 u64 ctrl_val, bits, mask;
504 int err;
505
506 /*
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100507 * Enable IRQ generation (0x8),
508 * and enable ring-3 counting (0x2) and ring-0 counting (0x1)
509 * if requested:
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100510 */
Paul Mackerras0475f9e2009-02-11 14:35:35 +1100511 bits = 0x8ULL;
512 if (hwc->config & ARCH_PERFMON_EVENTSEL_USR)
513 bits |= 0x2;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100514 if (hwc->config & ARCH_PERFMON_EVENTSEL_OS)
515 bits |= 0x1;
516 bits <<= (idx * 4);
517 mask = 0xfULL << (idx * 4);
518
519 rdmsrl(hwc->config_base, ctrl_val);
520 ctrl_val &= ~mask;
521 ctrl_val |= bits;
522 err = checking_wrmsrl(hwc->config_base, ctrl_val);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100523}
524
Robert Richter7c90cc42009-04-29 12:47:18 +0200525static void intel_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100526{
Robert Richter7c90cc42009-04-29 12:47:18 +0200527 if (unlikely(hwc->config_base == MSR_ARCH_PERFMON_FIXED_CTR_CTRL)) {
528 intel_pmu_enable_fixed(hwc, idx);
529 return;
530 }
531
532 x86_pmu_enable_counter(hwc, idx);
533}
534
535static void amd_pmu_enable_counter(struct hw_perf_counter *hwc, int idx)
536{
537 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
538
539 if (cpuc->enabled)
540 x86_pmu_enable_counter(hwc, idx);
Jaswinder Singh Rajput2b583d82008-12-27 19:15:43 +0530541 else
Robert Richterd4369892009-04-29 12:47:19 +0200542 x86_pmu_disable_counter(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100543}
544
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100545static int
546fixed_mode_idx(struct perf_counter *counter, struct hw_perf_counter *hwc)
Ingo Molnar862a1a52008-12-17 13:09:20 +0100547{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100548 unsigned int event;
549
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530550 if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD)
551 return -1;
552
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100553 if (unlikely(hwc->nmi))
554 return -1;
555
556 event = hwc->config & ARCH_PERFMON_EVENT_MASK;
557
Robert Richter4a06bd82009-04-29 12:47:11 +0200558 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_INSTRUCTIONS)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100559 return X86_PMC_IDX_FIXED_INSTRUCTIONS;
Robert Richter4a06bd82009-04-29 12:47:11 +0200560 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_CPU_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100561 return X86_PMC_IDX_FIXED_CPU_CYCLES;
Robert Richter4a06bd82009-04-29 12:47:11 +0200562 if (unlikely(event == x86_pmu.event_map(PERF_COUNT_BUS_CYCLES)))
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100563 return X86_PMC_IDX_FIXED_BUS_CYCLES;
564
Ingo Molnar862a1a52008-12-17 13:09:20 +0100565 return -1;
566}
567
Ingo Molnaree060942008-12-13 09:00:03 +0100568/*
569 * Find a PMC slot for the freshly enabled / scheduled in counter:
570 */
Robert Richter4aeb0b42009-04-29 12:47:03 +0200571static int x86_pmu_enable(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100572{
573 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
574 struct hw_perf_counter *hwc = &counter->hw;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100575 int idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100576
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100577 idx = fixed_mode_idx(counter, hwc);
578 if (idx >= 0) {
579 /*
580 * Try to get the fixed counter, if that is already taken
581 * then try to get a generic counter:
582 */
583 if (test_and_set_bit(idx, cpuc->used))
584 goto try_generic;
Ingo Molnar0dff86a2008-12-23 12:28:12 +0100585
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100586 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
587 /*
588 * We set it so that counter_base + idx in wrmsr/rdmsr maps to
589 * MSR_ARCH_PERFMON_FIXED_CTR0 ... CTR2:
590 */
591 hwc->counter_base =
592 MSR_ARCH_PERFMON_FIXED_CTR0 - X86_PMC_IDX_FIXED;
Ingo Molnar241771e2008-12-03 10:39:53 +0100593 hwc->idx = idx;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100594 } else {
595 idx = hwc->idx;
596 /* Try to get the previous generic counter again */
597 if (test_and_set_bit(idx, cpuc->used)) {
598try_generic:
Robert Richter0933e5c2009-04-29 12:47:12 +0200599 idx = find_first_zero_bit(cpuc->used,
600 x86_pmu.num_counters);
601 if (idx == x86_pmu.num_counters)
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100602 return -EAGAIN;
603
604 set_bit(idx, cpuc->used);
605 hwc->idx = idx;
606 }
Robert Richter4a06bd82009-04-29 12:47:11 +0200607 hwc->config_base = x86_pmu.eventsel;
608 hwc->counter_base = x86_pmu.perfctr;
Ingo Molnar241771e2008-12-03 10:39:53 +0100609 }
610
611 perf_counters_lapic_init(hwc->nmi);
612
Robert Richterd4369892009-04-29 12:47:19 +0200613 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100614
Ingo Molnar862a1a52008-12-17 13:09:20 +0100615 cpuc->counters[idx] = counter;
Robert Richter09534232009-04-29 12:47:16 +0200616 set_bit(idx, cpuc->active);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100617
Robert Richter26816c22009-04-29 12:47:08 +0200618 x86_perf_counter_set_period(counter, hwc, idx);
Robert Richter7c90cc42009-04-29 12:47:18 +0200619 x86_pmu.enable(hwc, idx);
Ingo Molnar95cdd2e2008-12-21 13:50:42 +0100620
621 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100622}
623
624void perf_counter_print_debug(void)
625{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100626 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Ingo Molnar0dff86a2008-12-23 12:28:12 +0100627 struct cpu_hw_counters *cpuc;
Ingo Molnar1e125672008-12-09 12:18:18 +0100628 int cpu, idx;
629
Robert Richter0933e5c2009-04-29 12:47:12 +0200630 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +0100631 return;
Ingo Molnar241771e2008-12-03 10:39:53 +0100632
633 local_irq_disable();
634
635 cpu = smp_processor_id();
Ingo Molnar0dff86a2008-12-23 12:28:12 +0100636 cpuc = &per_cpu(cpu_hw_counters, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100637
Robert Richterfaa28ae2009-04-29 12:47:13 +0200638 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530639 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
640 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
641 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
642 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +0100643
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530644 pr_info("\n");
645 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
646 pr_info("CPU#%d: status: %016llx\n", cpu, status);
647 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
648 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530649 }
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530650 pr_info("CPU#%d: used: %016llx\n", cpu, *(u64 *)cpuc->used);
Ingo Molnar241771e2008-12-03 10:39:53 +0100651
Robert Richter0933e5c2009-04-29 12:47:12 +0200652 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter4a06bd82009-04-29 12:47:11 +0200653 rdmsrl(x86_pmu.eventsel + idx, pmc_ctrl);
654 rdmsrl(x86_pmu.perfctr + idx, pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +0100655
Ingo Molnaree060942008-12-13 09:00:03 +0100656 prev_left = per_cpu(prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +0100657
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530658 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +0100659 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530660 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +0100661 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530662 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +0100663 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +0100664 }
Robert Richter0933e5c2009-04-29 12:47:12 +0200665 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100666 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
667
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +0530668 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100669 cpu, idx, pmc_count);
670 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100671 local_irq_enable();
672}
673
Robert Richter4aeb0b42009-04-29 12:47:03 +0200674static void x86_pmu_disable(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100675{
676 struct cpu_hw_counters *cpuc = &__get_cpu_var(cpu_hw_counters);
677 struct hw_perf_counter *hwc = &counter->hw;
Robert Richter6f00cad2009-04-29 12:47:17 +0200678 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100679
Robert Richter09534232009-04-29 12:47:16 +0200680 /*
681 * Must be done before we disable, otherwise the nmi handler
682 * could reenable again:
683 */
684 clear_bit(idx, cpuc->active);
Robert Richterd4369892009-04-29 12:47:19 +0200685 x86_pmu.disable(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100686
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100687 /*
688 * Make sure the cleared pointer becomes visible before we
689 * (potentially) free the counter:
690 */
Robert Richter527e26a2009-04-29 12:47:02 +0200691 barrier();
Ingo Molnar241771e2008-12-03 10:39:53 +0100692
Ingo Molnaree060942008-12-13 09:00:03 +0100693 /*
694 * Drain the remaining delta count out of a counter
695 * that we are disabling:
696 */
697 x86_perf_counter_update(counter, hwc, idx);
Robert Richter09534232009-04-29 12:47:16 +0200698 cpuc->counters[idx] = NULL;
699 clear_bit(idx, cpuc->used);
Ingo Molnar241771e2008-12-03 10:39:53 +0100700}
701
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100702/*
Ingo Molnaree060942008-12-13 09:00:03 +0100703 * Save and restart an expired counter. Called by NMI contexts,
704 * so it has to be careful about preempting normal counter ops:
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100705 */
Robert Richter55de0f22009-04-29 12:47:09 +0200706static void intel_pmu_save_and_restart(struct perf_counter *counter)
Ingo Molnar241771e2008-12-03 10:39:53 +0100707{
708 struct hw_perf_counter *hwc = &counter->hw;
709 int idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +0100710
Ingo Molnaree060942008-12-13 09:00:03 +0100711 x86_perf_counter_update(counter, hwc, idx);
Robert Richter26816c22009-04-29 12:47:08 +0200712 x86_perf_counter_set_period(counter, hwc, idx);
Ingo Molnar7e2ae342008-12-09 11:40:46 +0100713
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100714 if (counter->state == PERF_COUNTER_STATE_ACTIVE)
Robert Richter7c90cc42009-04-29 12:47:18 +0200715 intel_pmu_enable_counter(hwc, idx);
Ingo Molnar241771e2008-12-03 10:39:53 +0100716}
717
Ingo Molnar241771e2008-12-03 10:39:53 +0100718/*
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100719 * Maximum interrupt frequency of 100KHz per CPU
720 */
Jaswinder Singh Rajput169e41e2009-02-28 18:37:49 +0530721#define PERFMON_MAX_INTERRUPTS (100000/HZ)
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100722
723/*
Ingo Molnar241771e2008-12-03 10:39:53 +0100724 * This handler is triggered by the local APIC, so the APIC IRQ handling
725 * rules apply:
726 */
Robert Richter39d81ea2009-04-29 12:47:05 +0200727static int intel_pmu_handle_irq(struct pt_regs *regs, int nmi)
Ingo Molnar241771e2008-12-03 10:39:53 +0100728{
729 int bit, cpu = smp_processor_id();
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100730 u64 ack, status;
Mike Galbraith1b023a92009-01-23 10:13:01 +0100731 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100732 int ret = 0;
Ingo Molnar43874d22008-12-09 12:23:59 +0100733
Robert Richter55de0f22009-04-29 12:47:09 +0200734 cpuc->throttle_ctrl = intel_pmu_save_disable_all();
Ingo Molnar241771e2008-12-03 10:39:53 +0100735
Robert Richterb7f88592009-04-29 12:47:06 +0200736 status = intel_pmu_get_status(cpuc->throttle_ctrl);
Ingo Molnar87b9cf42008-12-08 14:20:16 +0100737 if (!status)
738 goto out;
739
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100740 ret = 1;
Ingo Molnar241771e2008-12-03 10:39:53 +0100741again:
Mike Galbraithd278c482009-02-09 07:38:50 +0100742 inc_irq_stat(apic_perf_irqs);
Ingo Molnar241771e2008-12-03 10:39:53 +0100743 ack = status;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +0100744 for_each_bit(bit, (unsigned long *)&status, X86_PMC_IDX_MAX) {
Ingo Molnar862a1a52008-12-17 13:09:20 +0100745 struct perf_counter *counter = cpuc->counters[bit];
Ingo Molnar241771e2008-12-03 10:39:53 +0100746
747 clear_bit(bit, (unsigned long *) &status);
Robert Richter09534232009-04-29 12:47:16 +0200748 if (!test_bit(bit, cpuc->active))
Ingo Molnar241771e2008-12-03 10:39:53 +0100749 continue;
750
Robert Richter55de0f22009-04-29 12:47:09 +0200751 intel_pmu_save_and_restart(counter);
Peter Zijlstra78f13e92009-04-08 15:01:33 +0200752 if (perf_counter_overflow(counter, nmi, regs, 0))
Robert Richterd4369892009-04-29 12:47:19 +0200753 intel_pmu_disable_counter(&counter->hw, bit);
Ingo Molnar241771e2008-12-03 10:39:53 +0100754 }
755
Robert Richterdee5d902009-04-29 12:47:07 +0200756 intel_pmu_ack_status(ack);
Ingo Molnar241771e2008-12-03 10:39:53 +0100757
758 /*
759 * Repeat if there is more work to be done:
760 */
Robert Richterb7f88592009-04-29 12:47:06 +0200761 status = intel_pmu_get_status(cpuc->throttle_ctrl);
Ingo Molnar241771e2008-12-03 10:39:53 +0100762 if (status)
763 goto again;
Ingo Molnar87b9cf42008-12-08 14:20:16 +0100764out:
Ingo Molnar241771e2008-12-03 10:39:53 +0100765 /*
Mike Galbraith1b023a92009-01-23 10:13:01 +0100766 * Restore - do not reenable when global enable is off or throttled:
Ingo Molnar241771e2008-12-03 10:39:53 +0100767 */
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100768 if (++cpuc->interrupts < PERFMON_MAX_INTERRUPTS)
Robert Richter55de0f22009-04-29 12:47:09 +0200769 intel_pmu_restore_all(cpuc->throttle_ctrl);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100770
771 return ret;
Mike Galbraith1b023a92009-01-23 10:13:01 +0100772}
773
Robert Richtera29aa8a2009-04-29 12:47:21 +0200774static int amd_pmu_handle_irq(struct pt_regs *regs, int nmi)
775{
776 int cpu = smp_processor_id();
777 struct cpu_hw_counters *cpuc = &per_cpu(cpu_hw_counters, cpu);
778 u64 val;
779 int handled = 0;
780 struct perf_counter *counter;
781 struct hw_perf_counter *hwc;
782 int idx;
783
784 ++cpuc->interrupts;
785 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
786 if (!test_bit(idx, cpuc->active))
787 continue;
788 counter = cpuc->counters[idx];
789 hwc = &counter->hw;
Robert Richter4b7bfd02009-04-29 12:47:22 +0200790 val = x86_perf_counter_update(counter, hwc, idx);
Robert Richtera29aa8a2009-04-29 12:47:21 +0200791 if (val & (1ULL << (x86_pmu.counter_bits - 1)))
792 continue;
793 /* counter overflow */
794 x86_perf_counter_set_period(counter, hwc, idx);
795 handled = 1;
796 inc_irq_stat(apic_perf_irqs);
797 if (perf_counter_overflow(counter, nmi, regs, 0))
798 amd_pmu_disable_counter(hwc, idx);
799 else if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS)
800 /*
801 * do not reenable when throttled, but reload
802 * the register
803 */
804 amd_pmu_disable_counter(hwc, idx);
805 else if (counter->state == PERF_COUNTER_STATE_ACTIVE)
806 amd_pmu_enable_counter(hwc, idx);
807 }
808 return handled;
809}
Robert Richter39d81ea2009-04-29 12:47:05 +0200810
Mike Galbraith1b023a92009-01-23 10:13:01 +0100811void perf_counter_unthrottle(void)
812{
813 struct cpu_hw_counters *cpuc;
814
Robert Richter85cf9db2009-04-29 12:47:20 +0200815 if (!x86_pmu_initialized())
Mike Galbraith1b023a92009-01-23 10:13:01 +0100816 return;
817
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100818 cpuc = &__get_cpu_var(cpu_hw_counters);
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100819 if (cpuc->interrupts >= PERFMON_MAX_INTERRUPTS) {
Mike Galbraith1b023a92009-01-23 10:13:01 +0100820 if (printk_ratelimit())
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100821 printk(KERN_WARNING "PERFMON: max interrupts exceeded!\n");
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100822 hw_perf_restore(cpuc->throttle_ctrl);
Mike Galbraith1b023a92009-01-23 10:13:01 +0100823 }
Mike Galbraith4b39fd92009-01-23 14:36:16 +0100824 cpuc->interrupts = 0;
Ingo Molnar241771e2008-12-03 10:39:53 +0100825}
826
827void smp_perf_counter_interrupt(struct pt_regs *regs)
828{
829 irq_enter();
Ingo Molnar241771e2008-12-03 10:39:53 +0100830 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100831 ack_APIC_irq();
Robert Richter4a06bd82009-04-29 12:47:11 +0200832 x86_pmu.handle_irq(regs, 0);
Ingo Molnar241771e2008-12-03 10:39:53 +0100833 irq_exit();
834}
835
Peter Zijlstrab6276f32009-04-06 11:45:03 +0200836void smp_perf_pending_interrupt(struct pt_regs *regs)
837{
838 irq_enter();
839 ack_APIC_irq();
840 inc_irq_stat(apic_pending_irqs);
841 perf_counter_do_pending();
842 irq_exit();
843}
844
845void set_perf_counter_pending(void)
846{
847 apic->send_IPI_self(LOCAL_PENDING_VECTOR);
848}
849
Mike Galbraith3415dd92009-01-23 14:16:53 +0100850void perf_counters_lapic_init(int nmi)
Ingo Molnar241771e2008-12-03 10:39:53 +0100851{
852 u32 apic_val;
853
Robert Richter85cf9db2009-04-29 12:47:20 +0200854 if (!x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +0100855 return;
Robert Richter85cf9db2009-04-29 12:47:20 +0200856
Ingo Molnar241771e2008-12-03 10:39:53 +0100857 /*
858 * Enable the performance counter vector in the APIC LVT:
859 */
860 apic_val = apic_read(APIC_LVTERR);
861
862 apic_write(APIC_LVTERR, apic_val | APIC_LVT_MASKED);
863 if (nmi)
864 apic_write(APIC_LVTPC, APIC_DM_NMI);
865 else
866 apic_write(APIC_LVTPC, LOCAL_PERF_VECTOR);
867 apic_write(APIC_LVTERR, apic_val);
868}
869
870static int __kprobes
871perf_counter_nmi_handler(struct notifier_block *self,
872 unsigned long cmd, void *__args)
873{
874 struct die_args *args = __args;
875 struct pt_regs *regs;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100876 int ret;
Ingo Molnar241771e2008-12-03 10:39:53 +0100877
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100878 switch (cmd) {
879 case DIE_NMI:
880 case DIE_NMI_IPI:
881 break;
882
883 default:
Ingo Molnar241771e2008-12-03 10:39:53 +0100884 return NOTIFY_DONE;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100885 }
Ingo Molnar241771e2008-12-03 10:39:53 +0100886
887 regs = args->regs;
888
889 apic_write(APIC_LVTPC, APIC_DM_NMI);
Robert Richter4a06bd82009-04-29 12:47:11 +0200890 ret = x86_pmu.handle_irq(regs, 1);
Ingo Molnar241771e2008-12-03 10:39:53 +0100891
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100892 return ret ? NOTIFY_STOP : NOTIFY_OK;
Ingo Molnar241771e2008-12-03 10:39:53 +0100893}
894
895static __read_mostly struct notifier_block perf_counter_nmi_notifier = {
Mike Galbraith5b75af02009-02-04 17:11:34 +0100896 .notifier_call = perf_counter_nmi_handler,
897 .next = NULL,
898 .priority = 1
Ingo Molnar241771e2008-12-03 10:39:53 +0100899};
900
Robert Richter5f4ec282009-04-29 12:47:04 +0200901static struct x86_pmu intel_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +0200902 .name = "Intel",
Robert Richter39d81ea2009-04-29 12:47:05 +0200903 .handle_irq = intel_pmu_handle_irq,
Robert Richter5f4ec282009-04-29 12:47:04 +0200904 .save_disable_all = intel_pmu_save_disable_all,
905 .restore_all = intel_pmu_restore_all,
Robert Richter5f4ec282009-04-29 12:47:04 +0200906 .enable = intel_pmu_enable_counter,
907 .disable = intel_pmu_disable_counter,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530908 .eventsel = MSR_ARCH_PERFMON_EVENTSEL0,
909 .perfctr = MSR_ARCH_PERFMON_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +0200910 .event_map = intel_pmu_event_map,
911 .raw_event = intel_pmu_raw_event,
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530912 .max_events = ARRAY_SIZE(intel_perfmon_event_map),
913};
914
Robert Richter5f4ec282009-04-29 12:47:04 +0200915static struct x86_pmu amd_pmu = {
Robert Richterfaa28ae2009-04-29 12:47:13 +0200916 .name = "AMD",
Robert Richter39d81ea2009-04-29 12:47:05 +0200917 .handle_irq = amd_pmu_handle_irq,
Robert Richter5f4ec282009-04-29 12:47:04 +0200918 .save_disable_all = amd_pmu_save_disable_all,
919 .restore_all = amd_pmu_restore_all,
Robert Richter5f4ec282009-04-29 12:47:04 +0200920 .enable = amd_pmu_enable_counter,
921 .disable = amd_pmu_disable_counter,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530922 .eventsel = MSR_K7_EVNTSEL0,
923 .perfctr = MSR_K7_PERFCTR0,
Robert Richter5f4ec282009-04-29 12:47:04 +0200924 .event_map = amd_pmu_event_map,
925 .raw_event = amd_pmu_raw_event,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530926 .max_events = ARRAY_SIZE(amd_perfmon_event_map),
Robert Richter0933e5c2009-04-29 12:47:12 +0200927 .num_counters = 4,
928 .counter_bits = 48,
929 .counter_mask = (1ULL << 48) - 1,
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530930};
931
Robert Richter72eae042009-04-29 12:47:10 +0200932static int intel_pmu_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +0100933{
Ingo Molnar703e9372008-12-17 10:51:15 +0100934 union cpuid10_edx edx;
Ingo Molnar7bb497b2009-03-18 08:59:21 +0100935 union cpuid10_eax eax;
936 unsigned int unused;
937 unsigned int ebx;
Robert Richterfaa28ae2009-04-29 12:47:13 +0200938 int version;
Ingo Molnar241771e2008-12-03 10:39:53 +0100939
Robert Richterda1a7762009-04-29 12:46:58 +0200940 if (!cpu_has(&boot_cpu_data, X86_FEATURE_ARCH_PERFMON))
Robert Richter72eae042009-04-29 12:47:10 +0200941 return -ENODEV;
Robert Richterda1a7762009-04-29 12:46:58 +0200942
Ingo Molnar241771e2008-12-03 10:39:53 +0100943 /*
944 * Check whether the Architectural PerfMon supports
945 * Branch Misses Retired Event or not.
946 */
Ingo Molnar703e9372008-12-17 10:51:15 +0100947 cpuid(10, &eax.full, &ebx, &unused, &edx.full);
Ingo Molnar241771e2008-12-03 10:39:53 +0100948 if (eax.split.mask_length <= ARCH_PERFMON_BRANCH_MISSES_RETIRED)
Robert Richter72eae042009-04-29 12:47:10 +0200949 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100950
Robert Richterfaa28ae2009-04-29 12:47:13 +0200951 version = eax.split.version_id;
952 if (version < 2)
Robert Richter72eae042009-04-29 12:47:10 +0200953 return -ENODEV;
Ingo Molnar7bb497b2009-03-18 08:59:21 +0100954
Robert Richter4a06bd82009-04-29 12:47:11 +0200955 x86_pmu = intel_pmu;
Robert Richterfaa28ae2009-04-29 12:47:13 +0200956 x86_pmu.version = version;
Robert Richter0933e5c2009-04-29 12:47:12 +0200957 x86_pmu.num_counters = eax.split.num_counters;
958 x86_pmu.num_counters_fixed = edx.split.num_counters_fixed;
959 x86_pmu.counter_bits = eax.split.bit_width;
960 x86_pmu.counter_mask = (1ULL << eax.split.bit_width) - 1;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530961
Robert Richter72eae042009-04-29 12:47:10 +0200962 return 0;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530963}
964
Robert Richter72eae042009-04-29 12:47:10 +0200965static int amd_pmu_init(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530966{
Robert Richter4a06bd82009-04-29 12:47:11 +0200967 x86_pmu = amd_pmu;
Robert Richter72eae042009-04-29 12:47:10 +0200968 return 0;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530969}
970
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530971void __init init_hw_perf_counters(void)
972{
Robert Richter72eae042009-04-29 12:47:10 +0200973 int err;
974
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530975 switch (boot_cpu_data.x86_vendor) {
976 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +0200977 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530978 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530979 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +0200980 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530981 break;
Robert Richter41389602009-04-29 12:47:00 +0200982 default:
983 return;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530984 }
Robert Richter72eae042009-04-29 12:47:10 +0200985 if (err != 0)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530986 return;
987
Robert Richterfaa28ae2009-04-29 12:47:13 +0200988 pr_info("%s Performance Monitoring support detected.\n", x86_pmu.name);
989 pr_info("... version: %d\n", x86_pmu.version);
990 pr_info("... bit width: %d\n", x86_pmu.counter_bits);
991
Robert Richter0933e5c2009-04-29 12:47:12 +0200992 pr_info("... num counters: %d\n", x86_pmu.num_counters);
993 if (x86_pmu.num_counters > X86_PMC_MAX_GENERIC) {
994 x86_pmu.num_counters = X86_PMC_MAX_GENERIC;
Ingo Molnar241771e2008-12-03 10:39:53 +0100995 WARN(1, KERN_ERR "hw perf counters %d > max(%d), clipping!",
Robert Richter0933e5c2009-04-29 12:47:12 +0200996 x86_pmu.num_counters, X86_PMC_MAX_GENERIC);
Ingo Molnar241771e2008-12-03 10:39:53 +0100997 }
Robert Richter0933e5c2009-04-29 12:47:12 +0200998 perf_counter_mask = (1 << x86_pmu.num_counters) - 1;
999 perf_max_counters = x86_pmu.num_counters;
Ingo Molnar241771e2008-12-03 10:39:53 +01001000
Robert Richter0933e5c2009-04-29 12:47:12 +02001001 pr_info("... value mask: %016Lx\n", x86_pmu.counter_mask);
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001002
Robert Richter0933e5c2009-04-29 12:47:12 +02001003 if (x86_pmu.num_counters_fixed > X86_PMC_MAX_FIXED) {
1004 x86_pmu.num_counters_fixed = X86_PMC_MAX_FIXED;
Ingo Molnar703e9372008-12-17 10:51:15 +01001005 WARN(1, KERN_ERR "hw perf counters fixed %d > max(%d), clipping!",
Robert Richter0933e5c2009-04-29 12:47:12 +02001006 x86_pmu.num_counters_fixed, X86_PMC_MAX_FIXED);
Ingo Molnar703e9372008-12-17 10:51:15 +01001007 }
Robert Richter0933e5c2009-04-29 12:47:12 +02001008 pr_info("... fixed counters: %d\n", x86_pmu.num_counters_fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001009
Robert Richter0933e5c2009-04-29 12:47:12 +02001010 perf_counter_mask |=
1011 ((1LL << x86_pmu.num_counters_fixed)-1) << X86_PMC_IDX_FIXED;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001012
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301013 pr_info("... counter mask: %016Lx\n", perf_counter_mask);
Ingo Molnar75f224cf2008-12-14 21:58:46 +01001014
Ingo Molnar241771e2008-12-03 10:39:53 +01001015 perf_counters_lapic_init(0);
1016 register_die_notifier(&perf_counter_nmi_notifier);
Ingo Molnar241771e2008-12-03 10:39:53 +01001017}
Ingo Molnar621a01e2008-12-11 12:46:46 +01001018
Robert Richterbb775fc2009-04-29 12:47:14 +02001019static inline void x86_pmu_read(struct perf_counter *counter)
Ingo Molnaree060942008-12-13 09:00:03 +01001020{
1021 x86_perf_counter_update(counter, &counter->hw, counter->hw.idx);
1022}
1023
Robert Richter4aeb0b42009-04-29 12:47:03 +02001024static const struct pmu pmu = {
1025 .enable = x86_pmu_enable,
1026 .disable = x86_pmu_disable,
1027 .read = x86_pmu_read,
Ingo Molnar621a01e2008-12-11 12:46:46 +01001028};
1029
Robert Richter4aeb0b42009-04-29 12:47:03 +02001030const struct pmu *hw_perf_counter_init(struct perf_counter *counter)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001031{
1032 int err;
1033
1034 err = __hw_perf_counter_init(counter);
1035 if (err)
Peter Zijlstra9ea98e12009-03-30 19:07:09 +02001036 return ERR_PTR(err);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001037
Robert Richter4aeb0b42009-04-29 12:47:03 +02001038 return &pmu;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001039}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001040
1041/*
1042 * callchain support
1043 */
1044
1045static inline
1046void callchain_store(struct perf_callchain_entry *entry, unsigned long ip)
1047{
1048 if (entry->nr < MAX_STACK_DEPTH)
1049 entry->ip[entry->nr++] = ip;
1050}
1051
1052static DEFINE_PER_CPU(struct perf_callchain_entry, irq_entry);
1053static DEFINE_PER_CPU(struct perf_callchain_entry, nmi_entry);
1054
1055
1056static void
1057backtrace_warning_symbol(void *data, char *msg, unsigned long symbol)
1058{
1059 /* Ignore warnings */
1060}
1061
1062static void backtrace_warning(void *data, char *msg)
1063{
1064 /* Ignore warnings */
1065}
1066
1067static int backtrace_stack(void *data, char *name)
1068{
1069 /* Don't bother with IRQ stacks for now */
1070 return -1;
1071}
1072
1073static void backtrace_address(void *data, unsigned long addr, int reliable)
1074{
1075 struct perf_callchain_entry *entry = data;
1076
1077 if (reliable)
1078 callchain_store(entry, addr);
1079}
1080
1081static const struct stacktrace_ops backtrace_ops = {
1082 .warning = backtrace_warning,
1083 .warning_symbol = backtrace_warning_symbol,
1084 .stack = backtrace_stack,
1085 .address = backtrace_address,
1086};
1087
1088static void
1089perf_callchain_kernel(struct pt_regs *regs, struct perf_callchain_entry *entry)
1090{
1091 unsigned long bp;
1092 char *stack;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001093 int nr = entry->nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001094
1095 callchain_store(entry, instruction_pointer(regs));
1096
1097 stack = ((char *)regs + sizeof(struct pt_regs));
1098#ifdef CONFIG_FRAME_POINTER
1099 bp = frame_pointer(regs);
1100#else
1101 bp = 0;
1102#endif
1103
1104 dump_trace(NULL, regs, (void *)stack, bp, &backtrace_ops, entry);
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001105
1106 entry->kernel = entry->nr - nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001107}
1108
1109
1110struct stack_frame {
1111 const void __user *next_fp;
1112 unsigned long return_address;
1113};
1114
1115static int copy_stack_frame(const void __user *fp, struct stack_frame *frame)
1116{
1117 int ret;
1118
1119 if (!access_ok(VERIFY_READ, fp, sizeof(*frame)))
1120 return 0;
1121
1122 ret = 1;
1123 pagefault_disable();
1124 if (__copy_from_user_inatomic(frame, fp, sizeof(*frame)))
1125 ret = 0;
1126 pagefault_enable();
1127
1128 return ret;
1129}
1130
1131static void
1132perf_callchain_user(struct pt_regs *regs, struct perf_callchain_entry *entry)
1133{
1134 struct stack_frame frame;
1135 const void __user *fp;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001136 int nr = entry->nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001137
1138 regs = (struct pt_regs *)current->thread.sp0 - 1;
1139 fp = (void __user *)regs->bp;
1140
1141 callchain_store(entry, regs->ip);
1142
1143 while (entry->nr < MAX_STACK_DEPTH) {
1144 frame.next_fp = NULL;
1145 frame.return_address = 0;
1146
1147 if (!copy_stack_frame(fp, &frame))
1148 break;
1149
1150 if ((unsigned long)fp < user_stack_pointer(regs))
1151 break;
1152
1153 callchain_store(entry, frame.return_address);
1154 fp = frame.next_fp;
1155 }
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001156
1157 entry->user = entry->nr - nr;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001158}
1159
1160static void
1161perf_do_callchain(struct pt_regs *regs, struct perf_callchain_entry *entry)
1162{
1163 int is_user;
1164
1165 if (!regs)
1166 return;
1167
1168 is_user = user_mode(regs);
1169
1170 if (!current || current->pid == 0)
1171 return;
1172
1173 if (is_user && current->state != TASK_RUNNING)
1174 return;
1175
1176 if (!is_user)
1177 perf_callchain_kernel(regs, entry);
1178
1179 if (current->mm)
1180 perf_callchain_user(regs, entry);
1181}
1182
1183struct perf_callchain_entry *perf_callchain(struct pt_regs *regs)
1184{
1185 struct perf_callchain_entry *entry;
1186
1187 if (in_nmi())
1188 entry = &__get_cpu_var(nmi_entry);
1189 else
1190 entry = &__get_cpu_var(irq_entry);
1191
1192 entry->nr = 0;
Peter Zijlstra5872bdb82009-04-02 11:12:03 +02001193 entry->hv = 0;
1194 entry->kernel = 0;
1195 entry->user = 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02001196
1197 perf_do_callchain(regs, entry);
1198
1199 return entry;
1200}