blob: 154e9e11c6c7a48dbc23dd0663c880a384a66d6a [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
Peter Zijlstra90eec102015-11-16 11:08:45 +01008 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Paul Gortmakereb008eb2016-07-13 20:19:01 -040020#include <linux/export.h>
21#include <linux/init.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010022#include <linux/kdebug.h>
23#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020024#include <linux/uaccess.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020026#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010027#include <linux/bitops.h>
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +010028#include <linux/device.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010029
Ingo Molnar241771e2008-12-03 10:39:53 +010030#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020031#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020032#include <asm/nmi.h>
Lin Ming69092622011-03-03 10:34:50 +080033#include <asm/smp.h>
Robert Richterc8e59102011-04-16 02:27:55 +020034#include <asm/alternative.h>
Andy Lutomirski7911d3f2014-10-24 15:58:12 -070035#include <asm/mmu_context.h>
Andy Lutomirski375074c2014-10-24 15:58:07 -070036#include <asm/tlbflush.h>
Peter Zijlstrae3f35412011-11-21 11:43:53 +010037#include <asm/timer.h>
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +020038#include <asm/desc.h>
39#include <asm/ldt.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010040
Borislav Petkov27f6d222016-02-10 10:55:23 +010041#include "perf_event.h"
Kevin Winchesterde0428a2011-08-30 20:41:05 -030042
Kevin Winchesterde0428a2011-08-30 20:41:05 -030043struct x86_pmu x86_pmu __read_mostly;
Stephane Eranianefc9f052011-06-06 16:57:03 +020044
Kevin Winchesterde0428a2011-08-30 20:41:05 -030045DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010046 .enabled = 1,
47};
Ingo Molnar241771e2008-12-03 10:39:53 +010048
Andy Lutomirskia6673422014-10-24 15:58:13 -070049struct static_key rdpmc_always_available = STATIC_KEY_INIT_FALSE;
50
Kevin Winchesterde0428a2011-08-30 20:41:05 -030051u64 __read_mostly hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +020052 [PERF_COUNT_HW_CACHE_MAX]
53 [PERF_COUNT_HW_CACHE_OP_MAX]
54 [PERF_COUNT_HW_CACHE_RESULT_MAX];
Kevin Winchesterde0428a2011-08-30 20:41:05 -030055u64 __read_mostly hw_cache_extra_regs
Andi Kleene994d7d2011-03-03 10:34:48 +080056 [PERF_COUNT_HW_CACHE_MAX]
57 [PERF_COUNT_HW_CACHE_OP_MAX]
58 [PERF_COUNT_HW_CACHE_RESULT_MAX];
Ingo Molnar8326f442009-06-05 20:22:46 +020059
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +053060/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020061 * Propagate event elapsed time into the generic event.
62 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +010063 * Returns the delta events processed.
64 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -030065u64 x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +010066{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +010067 struct hw_perf_event *hwc = &event->hw;
Robert Richter948b1bb2010-03-29 18:36:50 +020068 int shift = 64 - x86_pmu.cntval_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +020069 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +010070 int idx = hwc->idx;
Peter Zijlstraec3232b2009-05-13 09:45:19 +020071 s64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +010072
Robert Richter15c7ad52012-06-20 20:46:33 +020073 if (idx == INTEL_PMC_IDX_FIXED_BTS)
Markus Metzger30dd5682009-07-21 15:56:48 +020074 return 0;
75
Ingo Molnaree060942008-12-13 09:00:03 +010076 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020077 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +010078 *
79 * Our tactic to handle this is to first atomically read and
80 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +020081 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +010082 */
83again:
Peter Zijlstrae7850592010-05-21 14:43:08 +020084 prev_raw_count = local64_read(&hwc->prev_count);
Vince Weaverc48b6052012-03-01 17:28:14 -050085 rdpmcl(hwc->event_base_rdpmc, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +010086
Peter Zijlstrae7850592010-05-21 14:43:08 +020087 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Ingo Molnaree060942008-12-13 09:00:03 +010088 new_raw_count) != prev_raw_count)
89 goto again;
90
91 /*
92 * Now we have the new raw value and have updated the prev
93 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +020094 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +010095 *
96 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +020097 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +010098 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +020099 delta = (new_raw_count << shift) - (prev_raw_count << shift);
100 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100101
Peter Zijlstrae7850592010-05-21 14:43:08 +0200102 local64_add(delta, &event->count);
103 local64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200104
105 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100106}
107
Andi Kleena7e3ed12011-03-03 10:34:47 +0800108/*
109 * Find and validate any extra registers to set up.
110 */
111static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
112{
Stephane Eranianefc9f052011-06-06 16:57:03 +0200113 struct hw_perf_event_extra *reg;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800114 struct extra_reg *er;
115
Stephane Eranianefc9f052011-06-06 16:57:03 +0200116 reg = &event->hw.extra_reg;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800117
118 if (!x86_pmu.extra_regs)
119 return 0;
120
121 for (er = x86_pmu.extra_regs; er->msr; er++) {
122 if (er->event != (config & er->config_mask))
123 continue;
124 if (event->attr.config1 & ~er->valid_mask)
125 return -EINVAL;
Kan Liang338b5222014-07-14 12:25:56 -0700126 /* Check if the extra msrs can be safely accessed*/
127 if (!er->extra_msr_access)
128 return -ENXIO;
Stephane Eranianefc9f052011-06-06 16:57:03 +0200129
130 reg->idx = er->idx;
131 reg->config = event->attr.config1;
132 reg->reg = er->msr;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800133 break;
134 }
135 return 0;
136}
137
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200138static atomic_t active_events;
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300139static atomic_t pmc_refcount;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200140static DEFINE_MUTEX(pmc_reserve_mutex);
141
Robert Richterb27ea292010-03-17 12:49:10 +0100142#ifdef CONFIG_X86_LOCAL_APIC
143
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200144static bool reserve_pmc_hardware(void)
145{
146 int i;
147
Robert Richter948b1bb2010-03-29 18:36:50 +0200148 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100149 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200150 goto perfctr_fail;
151 }
152
Robert Richter948b1bb2010-03-29 18:36:50 +0200153 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100154 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200155 goto eventsel_fail;
156 }
157
158 return true;
159
160eventsel_fail:
161 for (i--; i >= 0; i--)
Robert Richter41bf4982011-02-02 17:40:57 +0100162 release_evntsel_nmi(x86_pmu_config_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200163
Robert Richter948b1bb2010-03-29 18:36:50 +0200164 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200165
166perfctr_fail:
167 for (i--; i >= 0; i--)
Robert Richter41bf4982011-02-02 17:40:57 +0100168 release_perfctr_nmi(x86_pmu_event_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200169
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200170 return false;
171}
172
173static void release_pmc_hardware(void)
174{
175 int i;
176
Robert Richter948b1bb2010-03-29 18:36:50 +0200177 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100178 release_perfctr_nmi(x86_pmu_event_addr(i));
179 release_evntsel_nmi(x86_pmu_config_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200180 }
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200181}
182
Robert Richterb27ea292010-03-17 12:49:10 +0100183#else
184
185static bool reserve_pmc_hardware(void) { return true; }
186static void release_pmc_hardware(void) {}
187
188#endif
189
Don Zickus33c6d6a2010-11-22 16:55:23 -0500190static bool check_hw_exists(void)
191{
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100192 u64 val, val_fail, val_new= ~0;
193 int i, reg, reg_fail, ret = 0;
194 int bios_fail = 0;
Don Zickus68ab7472015-05-18 15:16:48 -0400195 int reg_safe = -1;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500196
Peter Zijlstra44072042010-12-08 15:56:23 +0100197 /*
198 * Check to see if the BIOS enabled any of the counters, if so
199 * complain and bail.
200 */
201 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100202 reg = x86_pmu_config_addr(i);
Peter Zijlstra44072042010-12-08 15:56:23 +0100203 ret = rdmsrl_safe(reg, &val);
204 if (ret)
205 goto msr_fail;
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100206 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
207 bios_fail = 1;
208 val_fail = val;
209 reg_fail = reg;
Don Zickus68ab7472015-05-18 15:16:48 -0400210 } else {
211 reg_safe = i;
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100212 }
Peter Zijlstra44072042010-12-08 15:56:23 +0100213 }
214
215 if (x86_pmu.num_counters_fixed) {
216 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
217 ret = rdmsrl_safe(reg, &val);
218 if (ret)
219 goto msr_fail;
220 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100221 if (val & (0x03 << i*4)) {
222 bios_fail = 1;
223 val_fail = val;
224 reg_fail = reg;
225 }
Peter Zijlstra44072042010-12-08 15:56:23 +0100226 }
227 }
228
229 /*
Don Zickus68ab7472015-05-18 15:16:48 -0400230 * If all the counters are enabled, the below test will always
231 * fail. The tools will also become useless in this scenario.
232 * Just fail and disable the hardware counters.
233 */
234
235 if (reg_safe == -1) {
236 reg = reg_safe;
237 goto msr_fail;
238 }
239
240 /*
Andre Przywarabffd5fc2012-10-09 17:38:35 +0200241 * Read the current value, change it and read it back to see if it
242 * matches, this is needed to detect certain hardware emulators
243 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
Peter Zijlstra44072042010-12-08 15:56:23 +0100244 */
Don Zickus68ab7472015-05-18 15:16:48 -0400245 reg = x86_pmu_event_addr(reg_safe);
Andre Przywarabffd5fc2012-10-09 17:38:35 +0200246 if (rdmsrl_safe(reg, &val))
247 goto msr_fail;
248 val ^= 0xffffUL;
Robert Richterf285f922012-06-20 20:46:36 +0200249 ret = wrmsrl_safe(reg, val);
250 ret |= rdmsrl_safe(reg, &val_new);
Don Zickus33c6d6a2010-11-22 16:55:23 -0500251 if (ret || val != val_new)
Peter Zijlstra44072042010-12-08 15:56:23 +0100252 goto msr_fail;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500253
Ingo Molnar45daae52011-03-25 10:24:23 +0100254 /*
255 * We still allow the PMU driver to operate:
256 */
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100257 if (bios_fail) {
Chen Yucong1b74dde2016-02-02 11:45:02 +0800258 pr_cont("Broken BIOS detected, complain to your hardware vendor.\n");
259 pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n",
260 reg_fail, val_fail);
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100261 }
Ingo Molnar45daae52011-03-25 10:24:23 +0100262
263 return true;
Peter Zijlstra44072042010-12-08 15:56:23 +0100264
265msr_fail:
Chen Yucong1b74dde2016-02-02 11:45:02 +0800266 pr_cont("Broken PMU hardware detected, using software events only.\n");
267 pr_info("%sFailed to access perfctr msr (MSR %x is %Lx)\n",
Peter Zijlstra (Intel)65d71fe2014-10-07 19:07:33 +0200268 boot_cpu_has(X86_FEATURE_HYPERVISOR) ? KERN_INFO : KERN_ERR,
269 reg, val_new);
Ingo Molnar45daae52011-03-25 10:24:23 +0100270
Peter Zijlstra44072042010-12-08 15:56:23 +0100271 return false;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500272}
273
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200274static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200275{
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300276 x86_release_hardware();
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300277 atomic_dec(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200278}
279
Alexander Shishkin48070342015-01-14 14:18:20 +0200280void hw_perf_lbr_event_destroy(struct perf_event *event)
281{
282 hw_perf_event_destroy(event);
283
284 /* undo the lbr/bts event accounting */
285 x86_del_exclusive(x86_lbr_exclusive_lbr);
286}
287
Robert Richter85cf9db2009-04-29 12:47:20 +0200288static inline int x86_pmu_initialized(void)
289{
290 return x86_pmu.handle_irq != NULL;
291}
292
Ingo Molnar8326f442009-06-05 20:22:46 +0200293static inline int
Andi Kleene994d7d2011-03-03 10:34:48 +0800294set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
Ingo Molnar8326f442009-06-05 20:22:46 +0200295{
Andi Kleene994d7d2011-03-03 10:34:48 +0800296 struct perf_event_attr *attr = &event->attr;
Ingo Molnar8326f442009-06-05 20:22:46 +0200297 unsigned int cache_type, cache_op, cache_result;
298 u64 config, val;
299
300 config = attr->config;
301
302 cache_type = (config >> 0) & 0xff;
303 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
304 return -EINVAL;
305
306 cache_op = (config >> 8) & 0xff;
307 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
308 return -EINVAL;
309
310 cache_result = (config >> 16) & 0xff;
311 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
312 return -EINVAL;
313
314 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
315
316 if (val == 0)
317 return -ENOENT;
318
319 if (val == -1)
320 return -EINVAL;
321
322 hwc->config |= val;
Andi Kleene994d7d2011-03-03 10:34:48 +0800323 attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
324 return x86_pmu_extra_regs(val, event);
Ingo Molnar8326f442009-06-05 20:22:46 +0200325}
326
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300327int x86_reserve_hardware(void)
328{
329 int err = 0;
330
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300331 if (!atomic_inc_not_zero(&pmc_refcount)) {
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300332 mutex_lock(&pmc_reserve_mutex);
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300333 if (atomic_read(&pmc_refcount) == 0) {
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300334 if (!reserve_pmc_hardware())
335 err = -EBUSY;
336 else
337 reserve_ds_buffers();
338 }
339 if (!err)
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300340 atomic_inc(&pmc_refcount);
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300341 mutex_unlock(&pmc_reserve_mutex);
342 }
343
344 return err;
345}
346
347void x86_release_hardware(void)
348{
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300349 if (atomic_dec_and_mutex_lock(&pmc_refcount, &pmc_reserve_mutex)) {
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300350 release_pmc_hardware();
351 release_ds_buffers();
352 mutex_unlock(&pmc_reserve_mutex);
353 }
354}
355
Alexander Shishkin48070342015-01-14 14:18:20 +0200356/*
357 * Check if we can create event of a certain type (that no conflicting events
358 * are present).
359 */
360int x86_add_exclusive(unsigned int what)
361{
Peter Zijlstra93472af2015-06-24 16:47:50 +0200362 int i;
Alexander Shishkin48070342015-01-14 14:18:20 +0200363
Alexander Shishkinccbebba2016-04-28 18:35:46 +0300364 if (x86_pmu.lbr_pt_coexist)
365 return 0;
366
Peter Zijlstra93472af2015-06-24 16:47:50 +0200367 if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
368 mutex_lock(&pmc_reserve_mutex);
369 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
370 if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
371 goto fail_unlock;
372 }
373 atomic_inc(&x86_pmu.lbr_exclusive[what]);
374 mutex_unlock(&pmc_reserve_mutex);
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300375 }
Alexander Shishkin48070342015-01-14 14:18:20 +0200376
Peter Zijlstra93472af2015-06-24 16:47:50 +0200377 atomic_inc(&active_events);
378 return 0;
Alexander Shishkin48070342015-01-14 14:18:20 +0200379
Peter Zijlstra93472af2015-06-24 16:47:50 +0200380fail_unlock:
Alexander Shishkin48070342015-01-14 14:18:20 +0200381 mutex_unlock(&pmc_reserve_mutex);
Peter Zijlstra93472af2015-06-24 16:47:50 +0200382 return -EBUSY;
Alexander Shishkin48070342015-01-14 14:18:20 +0200383}
384
385void x86_del_exclusive(unsigned int what)
386{
Alexander Shishkinccbebba2016-04-28 18:35:46 +0300387 if (x86_pmu.lbr_pt_coexist)
388 return;
389
Alexander Shishkin48070342015-01-14 14:18:20 +0200390 atomic_dec(&x86_pmu.lbr_exclusive[what]);
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300391 atomic_dec(&active_events);
Alexander Shishkin48070342015-01-14 14:18:20 +0200392}
393
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300394int x86_setup_perfctr(struct perf_event *event)
Robert Richterc1726f32010-04-13 22:23:11 +0200395{
396 struct perf_event_attr *attr = &event->attr;
397 struct hw_perf_event *hwc = &event->hw;
398 u64 config;
399
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +0100400 if (!is_sampling_event(event)) {
Robert Richterc1726f32010-04-13 22:23:11 +0200401 hwc->sample_period = x86_pmu.max_period;
402 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200403 local64_set(&hwc->period_left, hwc->sample_period);
Robert Richterc1726f32010-04-13 22:23:11 +0200404 }
405
406 if (attr->type == PERF_TYPE_RAW)
Peter Zijlstraed13ec52011-11-14 10:03:25 +0100407 return x86_pmu_extra_regs(event->attr.config, event);
Robert Richterc1726f32010-04-13 22:23:11 +0200408
409 if (attr->type == PERF_TYPE_HW_CACHE)
Andi Kleene994d7d2011-03-03 10:34:48 +0800410 return set_ext_hw_attr(hwc, event);
Robert Richterc1726f32010-04-13 22:23:11 +0200411
412 if (attr->config >= x86_pmu.max_events)
413 return -EINVAL;
414
415 /*
416 * The generic map:
417 */
418 config = x86_pmu.event_map(attr->config);
419
420 if (config == 0)
421 return -ENOENT;
422
423 if (config == -1LL)
424 return -EINVAL;
425
426 /*
427 * Branch tracing:
428 */
Peter Zijlstra18a073a2011-04-26 13:24:33 +0200429 if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
430 !attr->freq && hwc->sample_period == 1) {
Robert Richterc1726f32010-04-13 22:23:11 +0200431 /* BTS is not supported by this architecture. */
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200432 if (!x86_pmu.bts_active)
Robert Richterc1726f32010-04-13 22:23:11 +0200433 return -EOPNOTSUPP;
434
435 /* BTS is currently only allowed for user-mode. */
436 if (!attr->exclude_kernel)
437 return -EOPNOTSUPP;
Alexander Shishkin48070342015-01-14 14:18:20 +0200438
439 /* disallow bts if conflicting events are present */
440 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
441 return -EBUSY;
442
443 event->destroy = hw_perf_lbr_event_destroy;
Robert Richterc1726f32010-04-13 22:23:11 +0200444 }
445
446 hwc->config |= config;
447
448 return 0;
449}
Robert Richter4261e0e2010-04-13 22:23:10 +0200450
Stephane Eranianff3fb512012-02-09 23:20:54 +0100451/*
452 * check that branch_sample_type is compatible with
453 * settings needed for precise_ip > 1 which implies
454 * using the LBR to capture ALL taken branches at the
455 * priv levels of the measurement
456 */
457static inline int precise_br_compat(struct perf_event *event)
458{
459 u64 m = event->attr.branch_sample_type;
460 u64 b = 0;
461
462 /* must capture all branches */
463 if (!(m & PERF_SAMPLE_BRANCH_ANY))
464 return 0;
465
466 m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
467
468 if (!event->attr.exclude_user)
469 b |= PERF_SAMPLE_BRANCH_USER;
470
471 if (!event->attr.exclude_kernel)
472 b |= PERF_SAMPLE_BRANCH_KERNEL;
473
474 /*
475 * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
476 */
477
478 return m == b;
479}
480
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300481int x86_pmu_hw_config(struct perf_event *event)
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300482{
Peter Zijlstraab608342010-04-08 23:03:20 +0200483 if (event->attr.precise_ip) {
484 int precise = 0;
485
486 /* Support for constant skid */
Peter Zijlstrac93dc842012-06-08 14:50:50 +0200487 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
Peter Zijlstraab608342010-04-08 23:03:20 +0200488 precise++;
489
Peter Zijlstra5553be22010-10-19 14:38:11 +0200490 /* Support for IP fixup */
Andi Kleen03de8742014-08-07 17:08:54 -0700491 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
Peter Zijlstra5553be22010-10-19 14:38:11 +0200492 precise++;
Andi Kleen72469762015-12-04 03:50:52 -0800493
494 if (x86_pmu.pebs_prec_dist)
495 precise++;
Peter Zijlstra5553be22010-10-19 14:38:11 +0200496 }
Peter Zijlstraab608342010-04-08 23:03:20 +0200497
498 if (event->attr.precise_ip > precise)
499 return -EOPNOTSUPP;
Yan, Zheng4b854902014-11-04 21:56:08 -0500500 }
501 /*
502 * check that PEBS LBR correction does not conflict with
503 * whatever the user is asking with attr->branch_sample_type
504 */
505 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) {
506 u64 *br_type = &event->attr.branch_sample_type;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100507
Yan, Zheng4b854902014-11-04 21:56:08 -0500508 if (has_branch_stack(event)) {
509 if (!precise_br_compat(event))
510 return -EOPNOTSUPP;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100511
Yan, Zheng4b854902014-11-04 21:56:08 -0500512 /* branch_sample_type is compatible */
Stephane Eranianff3fb512012-02-09 23:20:54 +0100513
Yan, Zheng4b854902014-11-04 21:56:08 -0500514 } else {
515 /*
516 * user did not specify branch_sample_type
517 *
518 * For PEBS fixups, we capture all
519 * the branches at the priv level of the
520 * event.
521 */
522 *br_type = PERF_SAMPLE_BRANCH_ANY;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100523
Yan, Zheng4b854902014-11-04 21:56:08 -0500524 if (!event->attr.exclude_user)
525 *br_type |= PERF_SAMPLE_BRANCH_USER;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100526
Yan, Zheng4b854902014-11-04 21:56:08 -0500527 if (!event->attr.exclude_kernel)
528 *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100529 }
Peter Zijlstraab608342010-04-08 23:03:20 +0200530 }
531
Yan, Zhenge18bf522014-11-04 21:56:03 -0500532 if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK)
533 event->attach_state |= PERF_ATTACH_TASK_DATA;
534
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300535 /*
536 * Generate PMC IRQs:
537 * (keep 'enabled' bit clear for now)
538 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200539 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300540
541 /*
542 * Count user and OS events unless requested not to
543 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200544 if (!event->attr.exclude_user)
545 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
546 if (!event->attr.exclude_kernel)
547 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
548
549 if (event->attr.type == PERF_TYPE_RAW)
550 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300551
Andi Kleen294fe0f2015-02-17 18:18:06 -0800552 if (event->attr.sample_period && x86_pmu.limit_period) {
553 if (x86_pmu.limit_period(event, event->attr.sample_period) >
554 event->attr.sample_period)
555 return -EINVAL;
556 }
557
Robert Richter9d0fcba62010-04-13 22:23:12 +0200558 return x86_setup_perfctr(event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300559}
560
Ingo Molnaree060942008-12-13 09:00:03 +0100561/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200562 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100563 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200564static int __x86_pmu_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100565{
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200566 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100567
Robert Richter85cf9db2009-04-29 12:47:20 +0200568 if (!x86_pmu_initialized())
569 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100570
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300571 err = x86_reserve_hardware();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200572 if (err)
573 return err;
574
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300575 atomic_inc(&active_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200576 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200577
Robert Richter4261e0e2010-04-13 22:23:10 +0200578 event->hw.idx = -1;
579 event->hw.last_cpu = -1;
580 event->hw.last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200581
Stephane Eranianefc9f052011-06-06 16:57:03 +0200582 /* mark unused */
583 event->hw.extra_reg.idx = EXTRA_REG_NONE;
Stephane Eranianb36817e2012-02-09 23:20:53 +0100584 event->hw.branch_reg.idx = EXTRA_REG_NONE;
585
Robert Richter9d0fcba62010-04-13 22:23:12 +0200586 return x86_pmu.hw_config(event);
Robert Richter4261e0e2010-04-13 22:23:10 +0200587}
588
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300589void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530590{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500591 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200592 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100593
Robert Richter948b1bb2010-03-29 18:36:50 +0200594 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100595 u64 val;
596
Robert Richter43f62012009-04-29 16:55:56 +0200597 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200598 continue;
Robert Richter41bf4982011-02-02 17:40:57 +0100599 rdmsrl(x86_pmu_config_addr(idx), val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100600 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200601 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100602 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Robert Richter41bf4982011-02-02 17:40:57 +0100603 wrmsrl(x86_pmu_config_addr(idx), val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530604 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530605}
606
Kan Liangc3d266c2016-03-03 18:07:28 -0500607/*
608 * There may be PMI landing after enabled=0. The PMI hitting could be before or
609 * after disable_all.
610 *
611 * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
612 * It will not be re-enabled in the NMI handler again, because enabled=0. After
613 * handling the NMI, disable_all will be called, which will not change the
614 * state either. If PMI hits after disable_all, the PMU is already disabled
615 * before entering NMI handler. The NMI handler will not change the state
616 * either.
617 *
618 * So either situation is harmless.
619 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200620static void x86_pmu_disable(struct pmu *pmu)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530621{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500622 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200623
Robert Richter85cf9db2009-04-29 12:47:20 +0200624 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200625 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200626
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100627 if (!cpuc->enabled)
628 return;
629
630 cpuc->n_added = 0;
631 cpuc->enabled = 0;
632 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200633
634 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530635}
Ingo Molnar241771e2008-12-03 10:39:53 +0100636
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300637void x86_pmu_enable_all(int added)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530638{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500639 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530640 int idx;
641
Robert Richter948b1bb2010-03-29 18:36:50 +0200642 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richterd45dd922011-02-02 17:40:56 +0100643 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100644
Robert Richter43f62012009-04-29 16:55:56 +0200645 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200646 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200647
Robert Richterd45dd922011-02-02 17:40:56 +0100648 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530649 }
650}
651
Peter Zijlstra51b0fe32010-06-11 13:35:57 +0200652static struct pmu pmu;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200653
654static inline int is_x86_event(struct perf_event *event)
655{
656 return event->pmu == &pmu;
657}
658
Robert Richter1e2ad282011-11-18 12:35:21 +0100659/*
660 * Event scheduler state:
661 *
662 * Assign events iterating over all events and counters, beginning
663 * with events with least weights first. Keep the current iterator
664 * state in struct sched_state.
665 */
666struct sched_state {
667 int weight;
668 int event; /* event index */
669 int counter; /* counter index */
670 int unassigned; /* number of events to be assigned left */
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200671 int nr_gp; /* number of GP counters used */
Robert Richter1e2ad282011-11-18 12:35:21 +0100672 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
673};
674
Robert Richterbc1738f2011-11-18 12:35:22 +0100675/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
676#define SCHED_STATES_MAX 2
677
Robert Richter1e2ad282011-11-18 12:35:21 +0100678struct perf_sched {
679 int max_weight;
680 int max_events;
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200681 int max_gp;
682 int saved_states;
Peter Zijlstrab371b592015-05-21 10:57:13 +0200683 struct event_constraint **constraints;
Robert Richter1e2ad282011-11-18 12:35:21 +0100684 struct sched_state state;
Robert Richterbc1738f2011-11-18 12:35:22 +0100685 struct sched_state saved[SCHED_STATES_MAX];
Robert Richter1e2ad282011-11-18 12:35:21 +0100686};
687
688/*
689 * Initialize interator that runs through all events and counters.
690 */
Peter Zijlstrab371b592015-05-21 10:57:13 +0200691static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200692 int num, int wmin, int wmax, int gpmax)
Robert Richter1e2ad282011-11-18 12:35:21 +0100693{
694 int idx;
695
696 memset(sched, 0, sizeof(*sched));
697 sched->max_events = num;
698 sched->max_weight = wmax;
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200699 sched->max_gp = gpmax;
Peter Zijlstrab371b592015-05-21 10:57:13 +0200700 sched->constraints = constraints;
Robert Richter1e2ad282011-11-18 12:35:21 +0100701
702 for (idx = 0; idx < num; idx++) {
Peter Zijlstrab371b592015-05-21 10:57:13 +0200703 if (constraints[idx]->weight == wmin)
Robert Richter1e2ad282011-11-18 12:35:21 +0100704 break;
705 }
706
707 sched->state.event = idx; /* start with min weight */
708 sched->state.weight = wmin;
709 sched->state.unassigned = num;
710}
711
Robert Richterbc1738f2011-11-18 12:35:22 +0100712static void perf_sched_save_state(struct perf_sched *sched)
713{
714 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
715 return;
716
717 sched->saved[sched->saved_states] = sched->state;
718 sched->saved_states++;
719}
720
721static bool perf_sched_restore_state(struct perf_sched *sched)
722{
723 if (!sched->saved_states)
724 return false;
725
726 sched->saved_states--;
727 sched->state = sched->saved[sched->saved_states];
728
729 /* continue with next counter: */
730 clear_bit(sched->state.counter++, sched->state.used);
731
732 return true;
733}
734
Robert Richter1e2ad282011-11-18 12:35:21 +0100735/*
736 * Select a counter for the current event to schedule. Return true on
737 * success.
738 */
Robert Richterbc1738f2011-11-18 12:35:22 +0100739static bool __perf_sched_find_counter(struct perf_sched *sched)
Robert Richter1e2ad282011-11-18 12:35:21 +0100740{
741 struct event_constraint *c;
742 int idx;
743
744 if (!sched->state.unassigned)
745 return false;
746
747 if (sched->state.event >= sched->max_events)
748 return false;
749
Peter Zijlstrab371b592015-05-21 10:57:13 +0200750 c = sched->constraints[sched->state.event];
Peter Zijlstra4defea82011-11-10 15:15:42 +0100751 /* Prefer fixed purpose counters */
Robert Richter15c7ad52012-06-20 20:46:33 +0200752 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
753 idx = INTEL_PMC_IDX_FIXED;
Akinobu Mita307b1cd2012-03-23 15:02:03 -0700754 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
Peter Zijlstra4defea82011-11-10 15:15:42 +0100755 if (!__test_and_set_bit(idx, sched->state.used))
756 goto done;
757 }
758 }
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200759
Robert Richter1e2ad282011-11-18 12:35:21 +0100760 /* Grab the first unused counter starting with idx */
761 idx = sched->state.counter;
Robert Richter15c7ad52012-06-20 20:46:33 +0200762 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200763 if (!__test_and_set_bit(idx, sched->state.used)) {
764 if (sched->state.nr_gp++ >= sched->max_gp)
765 return false;
766
Peter Zijlstra4defea82011-11-10 15:15:42 +0100767 goto done;
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200768 }
Robert Richter1e2ad282011-11-18 12:35:21 +0100769 }
Robert Richter1e2ad282011-11-18 12:35:21 +0100770
Peter Zijlstra4defea82011-11-10 15:15:42 +0100771 return false;
772
773done:
774 sched->state.counter = idx;
Robert Richter1e2ad282011-11-18 12:35:21 +0100775
Robert Richterbc1738f2011-11-18 12:35:22 +0100776 if (c->overlap)
777 perf_sched_save_state(sched);
778
779 return true;
780}
781
782static bool perf_sched_find_counter(struct perf_sched *sched)
783{
784 while (!__perf_sched_find_counter(sched)) {
785 if (!perf_sched_restore_state(sched))
786 return false;
787 }
788
Robert Richter1e2ad282011-11-18 12:35:21 +0100789 return true;
790}
791
792/*
793 * Go through all unassigned events and find the next one to schedule.
794 * Take events with the least weight first. Return true on success.
795 */
796static bool perf_sched_next_event(struct perf_sched *sched)
797{
798 struct event_constraint *c;
799
800 if (!sched->state.unassigned || !--sched->state.unassigned)
801 return false;
802
803 do {
804 /* next event */
805 sched->state.event++;
806 if (sched->state.event >= sched->max_events) {
807 /* next weight */
808 sched->state.event = 0;
809 sched->state.weight++;
810 if (sched->state.weight > sched->max_weight)
811 return false;
812 }
Peter Zijlstrab371b592015-05-21 10:57:13 +0200813 c = sched->constraints[sched->state.event];
Robert Richter1e2ad282011-11-18 12:35:21 +0100814 } while (c->weight != sched->state.weight);
815
816 sched->state.counter = 0; /* start with first counter */
817
818 return true;
819}
820
821/*
822 * Assign a counter for each event.
823 */
Peter Zijlstrab371b592015-05-21 10:57:13 +0200824int perf_assign_events(struct event_constraint **constraints, int n,
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200825 int wmin, int wmax, int gpmax, int *assign)
Robert Richter1e2ad282011-11-18 12:35:21 +0100826{
827 struct perf_sched sched;
828
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200829 perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
Robert Richter1e2ad282011-11-18 12:35:21 +0100830
831 do {
832 if (!perf_sched_find_counter(&sched))
833 break; /* failed */
834 if (assign)
835 assign[sched.state.event] = sched.state.counter;
836 } while (perf_sched_next_event(&sched));
837
838 return sched.state.unassigned;
839}
Yan, Zheng4a3dc122014-03-18 16:56:43 +0800840EXPORT_SYMBOL_GPL(perf_assign_events);
Robert Richter1e2ad282011-11-18 12:35:21 +0100841
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300842int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200843{
Andrew Hunter43b457802013-05-23 11:07:03 -0700844 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200845 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200846 struct perf_event *e;
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100847 int i, wmin, wmax, unsched = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200848 struct hw_perf_event *hwc;
849
850 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
851
Maria Dimakopoulouc5362c02014-11-17 20:06:55 +0100852 if (x86_pmu.start_scheduling)
853 x86_pmu.start_scheduling(cpuc);
854
Robert Richter1e2ad282011-11-18 12:35:21 +0100855 for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
Peter Zijlstrab371b592015-05-21 10:57:13 +0200856 cpuc->event_constraint[i] = NULL;
Stephane Eranian79cba822014-11-17 20:06:56 +0100857 c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
Peter Zijlstrab371b592015-05-21 10:57:13 +0200858 cpuc->event_constraint[i] = c;
Andrew Hunter43b457802013-05-23 11:07:03 -0700859
Robert Richter1e2ad282011-11-18 12:35:21 +0100860 wmin = min(wmin, c->weight);
861 wmax = max(wmax, c->weight);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200862 }
863
864 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200865 * fastpath, try to reuse previous register
866 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100867 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200868 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstrab371b592015-05-21 10:57:13 +0200869 c = cpuc->event_constraint[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200870
871 /* never assigned */
872 if (hwc->idx == -1)
873 break;
874
875 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100876 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200877 break;
878
879 /* not already used */
880 if (test_bit(hwc->idx, used_mask))
881 break;
882
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100883 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200884 if (assign)
885 assign[i] = hwc->idx;
886 }
Stephane Eranian81130702010-01-21 17:39:01 +0200887
Robert Richter1e2ad282011-11-18 12:35:21 +0100888 /* slow path */
Peter Zijlstrab371b592015-05-21 10:57:13 +0200889 if (i != n) {
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200890 int gpmax = x86_pmu.num_counters;
891
892 /*
893 * Do not allow scheduling of more than half the available
894 * generic counters.
895 *
896 * This helps avoid counter starvation of sibling thread by
897 * ensuring at most half the counters cannot be in exclusive
898 * mode. There is no designated counters for the limits. Any
899 * N/2 counters can be used. This helps with events with
900 * specific counter constraints.
901 */
902 if (is_ht_workaround_enabled() && !cpuc->is_fake &&
903 READ_ONCE(cpuc->excl_cntrs->exclusive_present))
904 gpmax /= 2;
905
Peter Zijlstrab371b592015-05-21 10:57:13 +0200906 unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200907 wmax, gpmax, assign);
Peter Zijlstrab371b592015-05-21 10:57:13 +0200908 }
Stephane Eranian81130702010-01-21 17:39:01 +0200909
Stephane Eranian1da53e02010-01-18 10:58:01 +0200910 /*
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100911 * In case of success (unsched = 0), mark events as committed,
912 * so we do not put_constraint() in case new events are added
913 * and fail to be scheduled
914 *
915 * We invoke the lower level commit callback to lock the resource
916 *
917 * We do not need to do all of this in case we are called to
918 * validate an event group (assign == NULL)
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200919 */
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100920 if (!unsched && assign) {
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200921 for (i = 0; i < n; i++) {
922 e = cpuc->event_list[i];
923 e->hw.flags |= PERF_X86_EVENT_COMMITTED;
Maria Dimakopoulouc5362c02014-11-17 20:06:55 +0100924 if (x86_pmu.commit_scheduling)
Peter Zijlstrab371b592015-05-21 10:57:13 +0200925 x86_pmu.commit_scheduling(cpuc, i, assign[i]);
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200926 }
Peter Zijlstra8736e542015-05-21 10:57:43 +0200927 } else {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200928 for (i = 0; i < n; i++) {
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200929 e = cpuc->event_list[i];
930 /*
931 * do not put_constraint() on comitted events,
932 * because they are good to go
933 */
934 if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
935 continue;
936
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100937 /*
938 * release events that failed scheduling
939 */
Stephane Eranian1da53e02010-01-18 10:58:01 +0200940 if (x86_pmu.put_event_constraints)
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200941 x86_pmu.put_event_constraints(cpuc, e);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200942 }
943 }
Maria Dimakopoulouc5362c02014-11-17 20:06:55 +0100944
945 if (x86_pmu.stop_scheduling)
946 x86_pmu.stop_scheduling(cpuc);
947
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100948 return unsched ? -EINVAL : 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200949}
950
951/*
952 * dogrp: true if must collect siblings events (group)
953 * returns total number of events and error code
954 */
955static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
956{
957 struct perf_event *event;
958 int n, max_count;
959
Robert Richter948b1bb2010-03-29 18:36:50 +0200960 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200961
962 /* current number of events already accepted */
963 n = cpuc->n_events;
964
965 if (is_x86_event(leader)) {
966 if (n >= max_count)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100967 return -EINVAL;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200968 cpuc->event_list[n] = leader;
969 n++;
970 }
971 if (!dogrp)
972 return n;
973
974 list_for_each_entry(event, &leader->sibling_list, group_entry) {
975 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200976 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200977 continue;
978
979 if (n >= max_count)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100980 return -EINVAL;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200981
982 cpuc->event_list[n] = event;
983 n++;
984 }
985 return n;
986}
987
Stephane Eranian1da53e02010-01-18 10:58:01 +0200988static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200989 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200990{
Stephane Eranian447a1942010-02-01 14:50:01 +0200991 struct hw_perf_event *hwc = &event->hw;
992
993 hwc->idx = cpuc->assign[i];
994 hwc->last_cpu = smp_processor_id();
995 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +0200996
Robert Richter15c7ad52012-06-20 20:46:33 +0200997 if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200998 hwc->config_base = 0;
999 hwc->event_base = 0;
Robert Richter15c7ad52012-06-20 20:46:33 +02001000 } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001001 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
Robert Richter15c7ad52012-06-20 20:46:33 +02001002 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
1003 hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001004 } else {
Robert Richter73d6e522011-02-02 17:40:59 +01001005 hwc->config_base = x86_pmu_config_addr(hwc->idx);
1006 hwc->event_base = x86_pmu_event_addr(hwc->idx);
Jacob Shin0fbdad02013-02-06 11:26:28 -06001007 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001008 }
1009}
1010
Stephane Eranian447a1942010-02-01 14:50:01 +02001011static inline int match_prev_assignment(struct hw_perf_event *hwc,
1012 struct cpu_hw_events *cpuc,
1013 int i)
1014{
1015 return hwc->idx == cpuc->assign[i] &&
1016 hwc->last_cpu == smp_processor_id() &&
1017 hwc->last_tag == cpuc->tags[i];
1018}
1019
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001020static void x86_pmu_start(struct perf_event *event, int flags);
Peter Zijlstra2e841872010-01-25 15:58:43 +01001021
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001022static void x86_pmu_enable(struct pmu *pmu)
Ingo Molnaree060942008-12-13 09:00:03 +01001023{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001024 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001025 struct perf_event *event;
1026 struct hw_perf_event *hwc;
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001027 int i, added = cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001028
Robert Richter85cf9db2009-04-29 12:47:20 +02001029 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +01001030 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +01001031
1032 if (cpuc->enabled)
1033 return;
1034
Stephane Eranian1da53e02010-01-18 10:58:01 +02001035 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +01001036 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001037 /*
1038 * apply assignment obtained either from
1039 * hw_perf_group_sched_in() or x86_pmu_enable()
1040 *
1041 * step1: save events moving to new counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001042 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +01001043 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001044 event = cpuc->event_list[i];
1045 hwc = &event->hw;
1046
Stephane Eranian447a1942010-02-01 14:50:01 +02001047 /*
1048 * we can avoid reprogramming counter if:
1049 * - assigned same counter as last time
1050 * - running on same CPU as last time
1051 * - no other event has used the counter since
1052 */
1053 if (hwc->idx == -1 ||
1054 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +02001055 continue;
1056
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001057 /*
1058 * Ensure we don't accidentally enable a stopped
1059 * counter simply because we rescheduled.
1060 */
1061 if (hwc->state & PERF_HES_STOPPED)
1062 hwc->state |= PERF_HES_ARCH;
1063
1064 x86_pmu_stop(event, PERF_EF_UPDATE);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001065 }
1066
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001067 /*
1068 * step2: reprogram moved events into new counters
1069 */
Stephane Eranian1da53e02010-01-18 10:58:01 +02001070 for (i = 0; i < cpuc->n_events; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001071 event = cpuc->event_list[i];
1072 hwc = &event->hw;
1073
Peter Zijlstra45e16a62010-03-11 13:40:30 +01001074 if (!match_prev_assignment(hwc, cpuc, i))
Stephane Eranian447a1942010-02-01 14:50:01 +02001075 x86_assign_hw_event(event, cpuc, i);
Peter Zijlstra45e16a62010-03-11 13:40:30 +01001076 else if (i < n_running)
1077 continue;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001078
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001079 if (hwc->state & PERF_HES_ARCH)
1080 continue;
1081
1082 x86_pmu_start(event, PERF_EF_RELOAD);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001083 }
1084 cpuc->n_added = 0;
1085 perf_events_lapic_init();
1086 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +01001087
1088 cpuc->enabled = 1;
1089 barrier();
1090
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001091 x86_pmu.enable_all(added);
Ingo Molnaree060942008-12-13 09:00:03 +01001092}
Ingo Molnaree060942008-12-13 09:00:03 +01001093
Tejun Heo245b2e72009-06-24 15:13:48 +09001094static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001095
Ingo Molnaree060942008-12-13 09:00:03 +01001096/*
1097 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001098 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +01001099 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001100int x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001101{
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001102 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001103 s64 left = local64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001104 s64 period = hwc->sample_period;
Peter Zijlstra7645a242010-03-08 13:51:31 +01001105 int ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001106
Robert Richter15c7ad52012-06-20 20:46:33 +02001107 if (idx == INTEL_PMC_IDX_FIXED_BTS)
Markus Metzger30dd5682009-07-21 15:56:48 +02001108 return 0;
1109
Ingo Molnaree060942008-12-13 09:00:03 +01001110 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001111 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +01001112 */
1113 if (unlikely(left <= -period)) {
1114 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001115 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001116 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001117 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001118 }
1119
1120 if (unlikely(left <= 0)) {
1121 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001122 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001123 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001124 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001125 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001126 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001127 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001128 */
1129 if (unlikely(left < 2))
1130 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +01001131
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001132 if (left > x86_pmu.max_period)
1133 left = x86_pmu.max_period;
1134
Andi Kleen294fe0f2015-02-17 18:18:06 -08001135 if (x86_pmu.limit_period)
1136 left = x86_pmu.limit_period(event, left);
1137
Tejun Heo245b2e72009-06-24 15:13:48 +09001138 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +01001139
Yan, Zheng851559e2015-05-06 15:33:47 -04001140 if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
1141 local64_read(&hwc->prev_count) != (u64)-left) {
1142 /*
1143 * The hw event starts counting from this event offset,
1144 * mark it to be able to extra future deltas:
1145 */
1146 local64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +01001147
Yan, Zheng851559e2015-05-06 15:33:47 -04001148 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
1149 }
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +04001150
1151 /*
1152 * Due to erratum on certan cpu we need
1153 * a second write to be sure the register
1154 * is updated properly
1155 */
1156 if (x86_pmu.perfctr_second_write) {
Robert Richter73d6e522011-02-02 17:40:59 +01001157 wrmsrl(hwc->event_base,
Robert Richter948b1bb2010-03-29 18:36:50 +02001158 (u64)(-left) & x86_pmu.cntval_mask);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +04001159 }
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001160
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001161 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001162
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001163 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001164}
1165
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001166void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +02001167{
Tejun Heo0a3aee02010-12-18 16:28:55 +01001168 if (__this_cpu_read(cpu_hw_events.enabled))
Robert Richter31fa58a2010-04-13 22:23:14 +02001169 __x86_pmu_enable_event(&event->hw,
1170 ARCH_PERFMON_EVENTSEL_ENABLE);
Ingo Molnar241771e2008-12-03 10:39:53 +01001171}
1172
Ingo Molnaree060942008-12-13 09:00:03 +01001173/*
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001174 * Add a single event to the PMU.
Stephane Eranian1da53e02010-01-18 10:58:01 +02001175 *
1176 * The event is added to the group of enabled events
1177 * but only if it can be scehduled with existing events.
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001178 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001179static int x86_pmu_add(struct perf_event *event, int flags)
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001180{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001181 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001182 struct hw_perf_event *hwc;
1183 int assign[X86_PMC_IDX_MAX];
1184 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001185
Stephane Eranian1da53e02010-01-18 10:58:01 +02001186 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001187
Stephane Eranian1da53e02010-01-18 10:58:01 +02001188 n0 = cpuc->n_events;
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001189 ret = n = collect_events(cpuc, event, false);
1190 if (ret < 0)
1191 goto out;
Ingo Molnar53b441a2009-05-25 21:41:28 +02001192
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001193 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1194 if (!(flags & PERF_EF_START))
1195 hwc->state |= PERF_HES_ARCH;
1196
Lin Ming4d1c52b2010-04-23 13:56:12 +08001197 /*
1198 * If group events scheduling transaction was started,
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001199 * skip the schedulability test here, it will be performed
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001200 * at commit time (->commit_txn) as a whole.
Lin Ming4d1c52b2010-04-23 13:56:12 +08001201 */
Sukadev Bhattiprolu8f3e5682015-09-03 20:07:53 -07001202 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001203 goto done_collect;
Lin Ming4d1c52b2010-04-23 13:56:12 +08001204
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001205 ret = x86_pmu.schedule_events(cpuc, n, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001206 if (ret)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001207 goto out;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001208 /*
1209 * copy new assignment, now we know it is possible
1210 * will be used by hw_perf_enable()
1211 */
1212 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +01001213
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001214done_collect:
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001215 /*
1216 * Commit the collect_events() state. See x86_pmu_del() and
1217 * x86_pmu_*_txn().
1218 */
Stephane Eranian1da53e02010-01-18 10:58:01 +02001219 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +01001220 cpuc->n_added += n - n0;
Stephane Eranian90151c352010-05-25 16:23:10 +02001221 cpuc->n_txn += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001222
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001223 ret = 0;
1224out:
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001225 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001226}
1227
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001228static void x86_pmu_start(struct perf_event *event, int flags)
Stephane Eraniand76a0812010-02-08 17:06:01 +02001229{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001230 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001231 int idx = event->hw.idx;
1232
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001233 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1234 return;
Stephane Eraniand76a0812010-02-08 17:06:01 +02001235
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001236 if (WARN_ON_ONCE(idx == -1))
1237 return;
1238
1239 if (flags & PERF_EF_RELOAD) {
1240 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1241 x86_perf_event_set_period(event);
1242 }
1243
1244 event->hw.state = 0;
1245
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001246 cpuc->events[idx] = event;
1247 __set_bit(idx, cpuc->active_mask);
Robert Richter63e6be62010-09-15 18:20:34 +02001248 __set_bit(idx, cpuc->running);
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001249 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001250 perf_event_update_userpage(event);
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001251}
1252
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001253void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001254{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001255 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Andi Kleenda3e6062015-02-27 09:48:31 -08001256 u64 pebs, debugctl;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001257 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001258 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001259 int cpu, idx;
1260
Robert Richter948b1bb2010-03-29 18:36:50 +02001261 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +01001262 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001263
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001264 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001265
1266 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001267 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001268
Robert Richterfaa28ae2009-04-29 12:47:13 +02001269 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301270 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1271 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1272 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1273 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001274
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301275 pr_info("\n");
1276 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1277 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1278 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1279 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Andi Kleen15fde112015-02-27 09:48:32 -08001280 if (x86_pmu.pebs_constraints) {
1281 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1282 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
1283 }
Andi Kleenda3e6062015-02-27 09:48:31 -08001284 if (x86_pmu.lbr_nr) {
1285 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
1286 pr_info("CPU#%d: debugctl: %016llx\n", cpu, debugctl);
1287 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301288 }
Peter Zijlstra7645a242010-03-08 13:51:31 +01001289 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001290
Robert Richter948b1bb2010-03-29 18:36:50 +02001291 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter41bf4982011-02-02 17:40:57 +01001292 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1293 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001294
Tejun Heo245b2e72009-06-24 15:13:48 +09001295 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001296
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301297 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001298 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301299 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001300 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301301 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001302 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001303 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001304 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001305 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1306
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301307 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001308 cpu, idx, pmc_count);
1309 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001310 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001311}
1312
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001313void x86_pmu_stop(struct perf_event *event, int flags)
Ingo Molnar241771e2008-12-03 10:39:53 +01001314{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001315 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001316 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +01001317
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001318 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1319 x86_pmu.disable(event);
1320 cpuc->events[hwc->idx] = NULL;
1321 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1322 hwc->state |= PERF_HES_STOPPED;
1323 }
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001324
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001325 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1326 /*
1327 * Drain the remaining delta count out of a event
1328 * that we are disabling:
1329 */
1330 x86_perf_event_update(event);
1331 hwc->state |= PERF_HES_UPTODATE;
1332 }
Peter Zijlstra2e841872010-01-25 15:58:43 +01001333}
1334
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001335static void x86_pmu_del(struct perf_event *event, int flags)
Peter Zijlstra2e841872010-01-25 15:58:43 +01001336{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001337 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstra2e841872010-01-25 15:58:43 +01001338 int i;
1339
Stephane Eranian90151c352010-05-25 16:23:10 +02001340 /*
Stephane Eranian2f7f73a2013-06-20 18:42:54 +02001341 * event is descheduled
1342 */
1343 event->hw.flags &= ~PERF_X86_EVENT_COMMITTED;
1344
1345 /*
Stephane Eranian90151c352010-05-25 16:23:10 +02001346 * If we're called during a txn, we don't need to do anything.
1347 * The events never got scheduled and ->cancel_txn will truncate
1348 * the event_list.
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001349 *
1350 * XXX assumes any ->del() called during a TXN will only be on
1351 * an event added during that same TXN.
Stephane Eranian90151c352010-05-25 16:23:10 +02001352 */
Sukadev Bhattiprolu8f3e5682015-09-03 20:07:53 -07001353 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
Stephane Eranian90151c352010-05-25 16:23:10 +02001354 return;
1355
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001356 /*
1357 * Not a TXN, therefore cleanup properly.
1358 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001359 x86_pmu_stop(event, PERF_EF_UPDATE);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001360
Stephane Eranian1da53e02010-01-18 10:58:01 +02001361 for (i = 0; i < cpuc->n_events; i++) {
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001362 if (event == cpuc->event_list[i])
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001363 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001364 }
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001365
1366 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */
1367 return;
1368
1369 /* If we have a newly added event; make sure to decrease n_added. */
1370 if (i >= cpuc->n_events - cpuc->n_added)
1371 --cpuc->n_added;
1372
1373 if (x86_pmu.put_event_constraints)
1374 x86_pmu.put_event_constraints(cpuc, event);
1375
1376 /* Delete the array entry. */
Peter Zijlstrab371b592015-05-21 10:57:13 +02001377 while (++i < cpuc->n_events) {
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001378 cpuc->event_list[i-1] = cpuc->event_list[i];
Peter Zijlstrab371b592015-05-21 10:57:13 +02001379 cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
1380 }
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001381 --cpuc->n_events;
1382
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001383 perf_event_update_userpage(event);
Ingo Molnar241771e2008-12-03 10:39:53 +01001384}
1385
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001386int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001387{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001388 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001389 struct cpu_hw_events *cpuc;
1390 struct perf_event *event;
Vince Weaver11d15782009-07-08 17:46:14 -04001391 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001392 u64 val;
1393
Christoph Lameter89cbc762014-08-17 12:30:40 -05001394 cpuc = this_cpu_ptr(&cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001395
Don Zickus2bce5da2011-04-27 06:32:33 -04001396 /*
1397 * Some chipsets need to unmask the LVTPC in a particular spot
1398 * inside the nmi handler. As a result, the unmasking was pushed
1399 * into all the nmi handlers.
1400 *
1401 * This generic handler doesn't seem to have any issues where the
1402 * unmasking occurs so it was left at the top.
1403 */
1404 apic_write(APIC_LVTPC, APIC_DM_NMI);
1405
Robert Richter948b1bb2010-03-29 18:36:50 +02001406 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter63e6be62010-09-15 18:20:34 +02001407 if (!test_bit(idx, cpuc->active_mask)) {
1408 /*
1409 * Though we deactivated the counter some cpus
1410 * might still deliver spurious interrupts still
1411 * in flight. Catch them:
1412 */
1413 if (__test_and_clear_bit(idx, cpuc->running))
1414 handled++;
Robert Richtera29aa8a2009-04-29 12:47:21 +02001415 continue;
Robert Richter63e6be62010-09-15 18:20:34 +02001416 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001417
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001418 event = cpuc->events[idx];
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001419
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001420 val = x86_perf_event_update(event);
Robert Richter948b1bb2010-03-29 18:36:50 +02001421 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001422 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001423
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001424 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001425 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001426 */
Robert Richter4177c422010-09-02 15:07:48 -04001427 handled++;
Robert Richterfd0d0002012-04-02 20:19:08 +02001428 perf_sample_data_init(&data, 0, event->hw.last_period);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001429
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001430 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001431 continue;
1432
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001433 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001434 x86_pmu_stop(event, 0);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001435 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001436
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001437 if (handled)
1438 inc_irq_stat(apic_perf_irqs);
1439
Robert Richtera29aa8a2009-04-29 12:47:21 +02001440 return handled;
1441}
Robert Richter39d81ea2009-04-29 12:47:05 +02001442
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001443void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001444{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001445 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001446 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001447
Ingo Molnar241771e2008-12-03 10:39:53 +01001448 /*
Yong Wangc323d952009-05-29 13:28:35 +08001449 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001450 */
Yong Wangc323d952009-05-29 13:28:35 +08001451 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar241771e2008-12-03 10:39:53 +01001452}
1453
Masami Hiramatsu93266382014-04-17 17:18:14 +09001454static int
Don Zickus9c48f1c2011-09-30 15:06:21 -04001455perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01001456{
Dave Hansen14c63f12013-06-21 08:51:36 -07001457 u64 start_clock;
1458 u64 finish_clock;
Peter Zijlstrae8a923c2013-10-17 15:32:10 +02001459 int ret;
Dave Hansen14c63f12013-06-21 08:51:36 -07001460
Alexander Shishkin1b7b9382015-06-09 13:03:26 +03001461 /*
1462 * All PMUs/events that share this PMI handler should make sure to
1463 * increment active_events for their events.
1464 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001465 if (!atomic_read(&active_events))
Don Zickus9c48f1c2011-09-30 15:06:21 -04001466 return NMI_DONE;
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001467
Peter Zijlstrae8a923c2013-10-17 15:32:10 +02001468 start_clock = sched_clock();
Dave Hansen14c63f12013-06-21 08:51:36 -07001469 ret = x86_pmu.handle_irq(regs);
Peter Zijlstrae8a923c2013-10-17 15:32:10 +02001470 finish_clock = sched_clock();
Dave Hansen14c63f12013-06-21 08:51:36 -07001471
1472 perf_sample_event_took(finish_clock - start_clock);
1473
1474 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001475}
Masami Hiramatsu93266382014-04-17 17:18:14 +09001476NOKPROBE_SYMBOL(perf_event_nmi_handler);
Ingo Molnar241771e2008-12-03 10:39:53 +01001477
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001478struct event_constraint emptyconstraint;
1479struct event_constraint unconstrained;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301480
Paul Gortmaker148f9bb2013-06-18 18:23:59 -04001481static int
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001482x86_pmu_notifier(struct notifier_block *self, unsigned long action, void *hcpu)
1483{
1484 unsigned int cpu = (long)hcpu;
Peter Zijlstra7fdba1c2011-07-22 13:41:54 +02001485 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
Stephane Eranian90413462014-11-17 20:06:54 +01001486 int i, ret = NOTIFY_OK;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001487
1488 switch (action & ~CPU_TASKS_FROZEN) {
1489 case CPU_UP_PREPARE:
Stephane Eranian90413462014-11-17 20:06:54 +01001490 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
1491 cpuc->kfree_on_online[i] = NULL;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001492 if (x86_pmu.cpu_prepare)
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001493 ret = x86_pmu.cpu_prepare(cpu);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001494 break;
1495
1496 case CPU_STARTING:
1497 if (x86_pmu.cpu_starting)
1498 x86_pmu.cpu_starting(cpu);
1499 break;
1500
Peter Zijlstra7fdba1c2011-07-22 13:41:54 +02001501 case CPU_ONLINE:
Stephane Eranian90413462014-11-17 20:06:54 +01001502 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
1503 kfree(cpuc->kfree_on_online[i]);
1504 cpuc->kfree_on_online[i] = NULL;
1505 }
Peter Zijlstra7fdba1c2011-07-22 13:41:54 +02001506 break;
1507
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001508 case CPU_DYING:
1509 if (x86_pmu.cpu_dying)
1510 x86_pmu.cpu_dying(cpu);
1511 break;
1512
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001513 case CPU_UP_CANCELED:
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001514 case CPU_DEAD:
1515 if (x86_pmu.cpu_dead)
1516 x86_pmu.cpu_dead(cpu);
1517 break;
1518
1519 default:
1520 break;
1521 }
1522
Peter Zijlstrab38b24e2010-03-23 19:31:15 +01001523 return ret;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001524}
1525
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001526static void __init pmu_check_apic(void)
1527{
Borislav Petkov93984fb2016-04-04 22:25:00 +02001528 if (boot_cpu_has(X86_FEATURE_APIC))
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001529 return;
1530
1531 x86_pmu.apic = 0;
1532 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1533 pr_info("no hardware sampling interrupt available.\n");
Vince Weaverc184c982014-05-16 17:18:07 -04001534
1535 /*
1536 * If we have a PMU initialized but no APIC
1537 * interrupts, we cannot sample hardware
1538 * events (user-space has to fall back and
1539 * sample via a hrtimer based software event):
1540 */
1541 pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1542
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001543}
1544
Jiri Olsa641cc932012-03-15 20:09:14 +01001545static struct attribute_group x86_pmu_format_group = {
1546 .name = "format",
1547 .attrs = NULL,
1548};
1549
Jiri Olsa8300daa2012-10-10 14:53:12 +02001550/*
1551 * Remove all undefined events (x86_pmu.event_map(id) == 0)
1552 * out of events_attr attributes.
1553 */
1554static void __init filter_events(struct attribute **attrs)
1555{
Stephane Eranian3a54aaa2013-01-24 16:10:26 +01001556 struct device_attribute *d;
1557 struct perf_pmu_events_attr *pmu_attr;
Stephane Eranian61b87ca2015-12-07 20:33:25 +01001558 int offset = 0;
Jiri Olsa8300daa2012-10-10 14:53:12 +02001559 int i, j;
1560
1561 for (i = 0; attrs[i]; i++) {
Stephane Eranian3a54aaa2013-01-24 16:10:26 +01001562 d = (struct device_attribute *)attrs[i];
1563 pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
1564 /* str trumps id */
1565 if (pmu_attr->event_str)
1566 continue;
Stephane Eranian61b87ca2015-12-07 20:33:25 +01001567 if (x86_pmu.event_map(i + offset))
Jiri Olsa8300daa2012-10-10 14:53:12 +02001568 continue;
1569
1570 for (j = i; attrs[j]; j++)
1571 attrs[j] = attrs[j + 1];
1572
1573 /* Check the shifted attr. */
1574 i--;
Stephane Eranian61b87ca2015-12-07 20:33:25 +01001575
1576 /*
1577 * event_map() is index based, the attrs array is organized
1578 * by increasing event index. If we shift the events, then
1579 * we need to compensate for the event_map(), otherwise
1580 * we are looking up the wrong event in the map
1581 */
1582 offset++;
Jiri Olsa8300daa2012-10-10 14:53:12 +02001583 }
1584}
1585
Andi Kleen1a6461b2013-01-24 16:10:25 +01001586/* Merge two pointer arrays */
Andi Kleen47732d82015-06-29 14:22:13 -07001587__init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
Andi Kleen1a6461b2013-01-24 16:10:25 +01001588{
1589 struct attribute **new;
1590 int j, i;
1591
1592 for (j = 0; a[j]; j++)
1593 ;
1594 for (i = 0; b[i]; i++)
1595 j++;
1596 j++;
1597
1598 new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
1599 if (!new)
1600 return NULL;
1601
1602 j = 0;
1603 for (i = 0; a[i]; i++)
1604 new[j++] = a[i];
1605 for (i = 0; b[i]; i++)
1606 new[j++] = b[i];
1607 new[j] = NULL;
1608
1609 return new;
1610}
1611
Huang Ruic7ab62b2016-03-09 13:45:06 +08001612ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
Jiri Olsaa4747392012-10-10 14:53:11 +02001613{
1614 struct perf_pmu_events_attr *pmu_attr = \
1615 container_of(attr, struct perf_pmu_events_attr, attr);
Jiri Olsaa4747392012-10-10 14:53:11 +02001616 u64 config = x86_pmu.event_map(pmu_attr->id);
Stephane Eranian3a54aaa2013-01-24 16:10:26 +01001617
1618 /* string trumps id */
1619 if (pmu_attr->event_str)
1620 return sprintf(page, "%s", pmu_attr->event_str);
1621
Jiri Olsaa4747392012-10-10 14:53:11 +02001622 return x86_pmu.events_sysfs_show(page, config);
1623}
Huang Ruic7ab62b2016-03-09 13:45:06 +08001624EXPORT_SYMBOL_GPL(events_sysfs_show);
Jiri Olsaa4747392012-10-10 14:53:11 +02001625
Jiri Olsaa4747392012-10-10 14:53:11 +02001626EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1627EVENT_ATTR(instructions, INSTRUCTIONS );
1628EVENT_ATTR(cache-references, CACHE_REFERENCES );
1629EVENT_ATTR(cache-misses, CACHE_MISSES );
1630EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS );
1631EVENT_ATTR(branch-misses, BRANCH_MISSES );
1632EVENT_ATTR(bus-cycles, BUS_CYCLES );
1633EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND );
1634EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND );
1635EVENT_ATTR(ref-cycles, REF_CPU_CYCLES );
1636
1637static struct attribute *empty_attrs;
1638
Peter Huewe95d18aa2012-10-29 21:48:17 +01001639static struct attribute *events_attr[] = {
Jiri Olsaa4747392012-10-10 14:53:11 +02001640 EVENT_PTR(CPU_CYCLES),
1641 EVENT_PTR(INSTRUCTIONS),
1642 EVENT_PTR(CACHE_REFERENCES),
1643 EVENT_PTR(CACHE_MISSES),
1644 EVENT_PTR(BRANCH_INSTRUCTIONS),
1645 EVENT_PTR(BRANCH_MISSES),
1646 EVENT_PTR(BUS_CYCLES),
1647 EVENT_PTR(STALLED_CYCLES_FRONTEND),
1648 EVENT_PTR(STALLED_CYCLES_BACKEND),
1649 EVENT_PTR(REF_CPU_CYCLES),
1650 NULL,
1651};
1652
1653static struct attribute_group x86_pmu_events_group = {
1654 .name = "events",
1655 .attrs = events_attr,
1656};
1657
Jiri Olsa0bf79d42012-10-10 14:53:14 +02001658ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
Jiri Olsa43c032f2012-10-10 14:53:13 +02001659{
Jiri Olsa43c032f2012-10-10 14:53:13 +02001660 u64 umask = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
1661 u64 cmask = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24;
1662 bool edge = (config & ARCH_PERFMON_EVENTSEL_EDGE);
1663 bool pc = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL);
1664 bool any = (config & ARCH_PERFMON_EVENTSEL_ANY);
1665 bool inv = (config & ARCH_PERFMON_EVENTSEL_INV);
1666 ssize_t ret;
1667
1668 /*
1669 * We have whole page size to spend and just little data
1670 * to write, so we can safely use sprintf.
1671 */
1672 ret = sprintf(page, "event=0x%02llx", event);
1673
1674 if (umask)
1675 ret += sprintf(page + ret, ",umask=0x%02llx", umask);
1676
1677 if (edge)
1678 ret += sprintf(page + ret, ",edge");
1679
1680 if (pc)
1681 ret += sprintf(page + ret, ",pc");
1682
1683 if (any)
1684 ret += sprintf(page + ret, ",any");
1685
1686 if (inv)
1687 ret += sprintf(page + ret, ",inv");
1688
1689 if (cmask)
1690 ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
1691
1692 ret += sprintf(page + ret, "\n");
1693
1694 return ret;
1695}
1696
Yinghai Ludda99112011-01-21 15:30:01 -08001697static int __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301698{
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01001699 struct x86_pmu_quirk *quirk;
Robert Richter72eae042009-04-29 12:47:10 +02001700 int err;
1701
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001702 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001703
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301704 switch (boot_cpu_data.x86_vendor) {
1705 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001706 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301707 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301708 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001709 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301710 break;
Robert Richter41389602009-04-29 12:47:00 +02001711 default:
Ingo Molnar8a3da6c72013-09-28 15:48:48 +02001712 err = -ENOTSUPP;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301713 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001714 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001715 pr_cont("no PMU driver, software events only.\n");
Peter Zijlstra004417a2010-11-25 18:38:29 +01001716 return 0;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001717 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301718
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001719 pmu_check_apic();
1720
Don Zickus33c6d6a2010-11-22 16:55:23 -05001721 /* sanity check that the hardware exists or is emulated */
Peter Zijlstra44072042010-12-08 15:56:23 +01001722 if (!check_hw_exists())
Peter Zijlstra004417a2010-11-25 18:38:29 +01001723 return 0;
Don Zickus33c6d6a2010-11-22 16:55:23 -05001724
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001725 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001726
Peter Zijlstrae97df762014-02-05 20:48:51 +01001727 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1728
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01001729 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1730 quirk->func();
Peter Zijlstra3c447802010-03-04 21:49:01 +01001731
Robert Richtera1eac7a2012-06-20 20:46:34 +02001732 if (!x86_pmu.intel_ctrl)
1733 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001734
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001735 perf_events_lapic_init();
Don Zickus9c48f1c2011-09-30 15:06:21 -04001736 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001737
Peter Zijlstra63b14642010-01-22 16:32:17 +01001738 unconstrained = (struct event_constraint)
Robert Richter948b1bb2010-03-29 18:36:50 +02001739 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
Stephane Eranian9fac2cf2013-01-24 16:10:27 +01001740 0, x86_pmu.num_counters, 0, 0);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001741
Jiri Olsa641cc932012-03-15 20:09:14 +01001742 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01001743
Stephane Eranianf20093e2013-01-24 16:10:32 +01001744 if (x86_pmu.event_attrs)
1745 x86_pmu_events_group.attrs = x86_pmu.event_attrs;
1746
Jiri Olsaa4747392012-10-10 14:53:11 +02001747 if (!x86_pmu.events_sysfs_show)
1748 x86_pmu_events_group.attrs = &empty_attrs;
Jiri Olsa8300daa2012-10-10 14:53:12 +02001749 else
1750 filter_events(x86_pmu_events_group.attrs);
Jiri Olsaa4747392012-10-10 14:53:11 +02001751
Andi Kleen1a6461b2013-01-24 16:10:25 +01001752 if (x86_pmu.cpu_events) {
1753 struct attribute **tmp;
1754
1755 tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events);
1756 if (!WARN_ON(!tmp))
1757 x86_pmu_events_group.attrs = tmp;
1758 }
1759
Ingo Molnar57c0c152009-09-21 12:20:38 +02001760 pr_info("... version: %d\n", x86_pmu.version);
Robert Richter948b1bb2010-03-29 18:36:50 +02001761 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1762 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1763 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
Ingo Molnar57c0c152009-09-21 12:20:38 +02001764 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
Robert Richter948b1bb2010-03-29 18:36:50 +02001765 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
Robert Richterd6dc0b42010-03-17 12:49:13 +01001766 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001767
Peter Zijlstra2e80a822010-11-17 23:17:36 +01001768 perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001769 perf_cpu_notifier(x86_pmu_notifier);
Peter Zijlstra004417a2010-11-25 18:38:29 +01001770
1771 return 0;
Ingo Molnar241771e2008-12-03 10:39:53 +01001772}
Peter Zijlstra004417a2010-11-25 18:38:29 +01001773early_initcall(init_hw_perf_events);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001774
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001775static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001776{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001777 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001778}
1779
Lin Ming4d1c52b2010-04-23 13:56:12 +08001780/*
1781 * Start group events scheduling transaction
1782 * Set the flag to make pmu::enable() not perform the
1783 * schedulability test, it will be performed at commit time
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001784 *
1785 * We only support PERF_PMU_TXN_ADD transactions. Save the
1786 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
1787 * transactions.
Lin Ming4d1c52b2010-04-23 13:56:12 +08001788 */
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001789static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001790{
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001791 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1792
1793 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */
1794
1795 cpuc->txn_flags = txn_flags;
1796 if (txn_flags & ~PERF_PMU_TXN_ADD)
1797 return;
1798
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001799 perf_pmu_disable(pmu);
Tejun Heo0a3aee02010-12-18 16:28:55 +01001800 __this_cpu_write(cpu_hw_events.n_txn, 0);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001801}
1802
1803/*
1804 * Stop group events scheduling transaction
1805 * Clear the flag and pmu::enable() will perform the
1806 * schedulability test.
1807 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001808static void x86_pmu_cancel_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001809{
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001810 unsigned int txn_flags;
1811 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1812
1813 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1814
1815 txn_flags = cpuc->txn_flags;
1816 cpuc->txn_flags = 0;
1817 if (txn_flags & ~PERF_PMU_TXN_ADD)
1818 return;
1819
Stephane Eranian90151c352010-05-25 16:23:10 +02001820 /*
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001821 * Truncate collected array by the number of events added in this
1822 * transaction. See x86_pmu_add() and x86_pmu_*_txn().
Stephane Eranian90151c352010-05-25 16:23:10 +02001823 */
Tejun Heo0a3aee02010-12-18 16:28:55 +01001824 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1825 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001826 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001827}
1828
1829/*
1830 * Commit group events scheduling transaction
1831 * Perform the group schedulability test as a whole
1832 * Return 0 if success
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001833 *
1834 * Does not cancel the transaction on failure; expects the caller to do this.
Lin Ming4d1c52b2010-04-23 13:56:12 +08001835 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001836static int x86_pmu_commit_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001837{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001838 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001839 int assign[X86_PMC_IDX_MAX];
1840 int n, ret;
1841
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001842 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1843
1844 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
1845 cpuc->txn_flags = 0;
1846 return 0;
1847 }
1848
Lin Ming4d1c52b2010-04-23 13:56:12 +08001849 n = cpuc->n_events;
1850
1851 if (!x86_pmu_initialized())
1852 return -EAGAIN;
1853
1854 ret = x86_pmu.schedule_events(cpuc, n, assign);
1855 if (ret)
1856 return ret;
1857
1858 /*
1859 * copy new assignment, now we know it is possible
1860 * will be used by hw_perf_enable()
1861 */
1862 memcpy(cpuc->assign, assign, n*sizeof(int));
1863
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001864 cpuc->txn_flags = 0;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001865 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001866 return 0;
1867}
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001868/*
1869 * a fake_cpuc is used to validate event groups. Due to
1870 * the extra reg logic, we need to also allocate a fake
1871 * per_core and per_cpu structure. Otherwise, group events
1872 * using extra reg may conflict without the kernel being
1873 * able to catch this when the last event gets added to
1874 * the group.
1875 */
1876static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1877{
1878 kfree(cpuc->shared_regs);
1879 kfree(cpuc);
1880}
1881
1882static struct cpu_hw_events *allocate_fake_cpuc(void)
1883{
1884 struct cpu_hw_events *cpuc;
1885 int cpu = raw_smp_processor_id();
1886
1887 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1888 if (!cpuc)
1889 return ERR_PTR(-ENOMEM);
1890
1891 /* only needed, if we have extra_regs */
1892 if (x86_pmu.extra_regs) {
1893 cpuc->shared_regs = allocate_shared_regs(cpu);
1894 if (!cpuc->shared_regs)
1895 goto error;
1896 }
Peter Zijlstrab430f7c2012-06-05 15:30:31 +02001897 cpuc->is_fake = 1;
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001898 return cpuc;
1899error:
1900 free_fake_cpuc(cpuc);
1901 return ERR_PTR(-ENOMEM);
1902}
Lin Ming4d1c52b2010-04-23 13:56:12 +08001903
Stephane Eranian1da53e02010-01-18 10:58:01 +02001904/*
Peter Zijlstraca037702010-03-02 19:52:12 +01001905 * validate that we can schedule this event
1906 */
1907static int validate_event(struct perf_event *event)
1908{
1909 struct cpu_hw_events *fake_cpuc;
1910 struct event_constraint *c;
1911 int ret = 0;
1912
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001913 fake_cpuc = allocate_fake_cpuc();
1914 if (IS_ERR(fake_cpuc))
1915 return PTR_ERR(fake_cpuc);
Peter Zijlstraca037702010-03-02 19:52:12 +01001916
Stephane Eranian79cba822014-11-17 20:06:56 +01001917 c = x86_pmu.get_event_constraints(fake_cpuc, -1, event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001918
1919 if (!c || !c->weight)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +01001920 ret = -EINVAL;
Peter Zijlstraca037702010-03-02 19:52:12 +01001921
1922 if (x86_pmu.put_event_constraints)
1923 x86_pmu.put_event_constraints(fake_cpuc, event);
1924
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001925 free_fake_cpuc(fake_cpuc);
Peter Zijlstraca037702010-03-02 19:52:12 +01001926
1927 return ret;
1928}
1929
1930/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02001931 * validate a single event group
1932 *
1933 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01001934 * - check events are compatible which each other
1935 * - events do not compete for the same counter
1936 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001937 *
1938 * validation ensures the group can be loaded onto the
1939 * PMU if it was the only group available.
1940 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001941static int validate_group(struct perf_event *event)
1942{
Stephane Eranian1da53e02010-01-18 10:58:01 +02001943 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01001944 struct cpu_hw_events *fake_cpuc;
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +01001945 int ret = -EINVAL, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001946
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001947 fake_cpuc = allocate_fake_cpuc();
1948 if (IS_ERR(fake_cpuc))
1949 return PTR_ERR(fake_cpuc);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001950 /*
1951 * the event is not yet connected with its
1952 * siblings therefore we must first collect
1953 * existing siblings, then add the new event
1954 * before we can simulate the scheduling
1955 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01001956 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001957 if (n < 0)
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001958 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001959
Peter Zijlstra502568d2010-01-22 14:35:46 +01001960 fake_cpuc->n_events = n;
1961 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001962 if (n < 0)
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001963 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001964
Peter Zijlstra502568d2010-01-22 14:35:46 +01001965 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001966
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001967 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
Peter Zijlstra502568d2010-01-22 14:35:46 +01001968
Peter Zijlstra502568d2010-01-22 14:35:46 +01001969out:
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001970 free_fake_cpuc(fake_cpuc);
Peter Zijlstra502568d2010-01-22 14:35:46 +01001971 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001972}
1973
Yinghai Ludda99112011-01-21 15:30:01 -08001974static int x86_pmu_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01001975{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001976 struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01001977 int err;
1978
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02001979 switch (event->attr.type) {
1980 case PERF_TYPE_RAW:
1981 case PERF_TYPE_HARDWARE:
1982 case PERF_TYPE_HW_CACHE:
1983 break;
1984
1985 default:
1986 return -ENOENT;
1987 }
1988
1989 err = __x86_pmu_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001990 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02001991 /*
1992 * we temporarily connect event to its pmu
1993 * such that validate_group() can classify
1994 * it as an x86 event using is_x86_event()
1995 */
1996 tmp = event->pmu;
1997 event->pmu = &pmu;
1998
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001999 if (event->group_leader != event)
2000 err = validate_group(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01002001 else
2002 err = validate_event(event);
Stephane Eranian81130702010-01-21 17:39:01 +02002003
2004 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002005 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002006 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002007 if (event->destroy)
2008 event->destroy(event);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002009 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01002010
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002011 if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
2012 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
2013
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02002014 return err;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002015}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002016
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002017static void refresh_pce(void *ignored)
2018{
2019 if (current->mm)
2020 load_mm_cr4(current->mm);
2021}
2022
2023static void x86_pmu_event_mapped(struct perf_event *event)
2024{
2025 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2026 return;
2027
2028 if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
2029 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2030}
2031
2032static void x86_pmu_event_unmapped(struct perf_event *event)
2033{
2034 if (!current->mm)
2035 return;
2036
2037 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2038 return;
2039
2040 if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
2041 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2042}
2043
Peter Zijlstrafe4a3302011-11-20 20:44:06 +01002044static int x86_pmu_event_idx(struct perf_event *event)
2045{
2046 int idx = event->hw.idx;
2047
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002048 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
Peter Zijlstrac7206202012-03-22 17:26:36 +01002049 return 0;
2050
Robert Richter15c7ad52012-06-20 20:46:33 +02002051 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
2052 idx -= INTEL_PMC_IDX_FIXED;
Peter Zijlstrafe4a3302011-11-20 20:44:06 +01002053 idx |= 1 << 30;
2054 }
2055
2056 return idx + 1;
2057}
2058
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002059static ssize_t get_attr_rdpmc(struct device *cdev,
2060 struct device_attribute *attr,
2061 char *buf)
2062{
2063 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
2064}
2065
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002066static ssize_t set_attr_rdpmc(struct device *cdev,
2067 struct device_attribute *attr,
2068 const char *buf, size_t count)
2069{
Shuah Khane2b297f2012-06-10 21:13:41 -06002070 unsigned long val;
2071 ssize_t ret;
2072
2073 ret = kstrtoul(buf, 0, &val);
2074 if (ret)
2075 return ret;
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002076
Andy Lutomirskia6673422014-10-24 15:58:13 -07002077 if (val > 2)
2078 return -EINVAL;
2079
Peter Zijlstrae97df762014-02-05 20:48:51 +01002080 if (x86_pmu.attr_rdpmc_broken)
2081 return -ENOTSUPP;
2082
Andy Lutomirskia6673422014-10-24 15:58:13 -07002083 if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
2084 /*
2085 * Changing into or out of always available, aka
2086 * perf-event-bypassing mode. This path is extremely slow,
2087 * but only root can trigger it, so it's okay.
2088 */
2089 if (val == 2)
2090 static_key_slow_inc(&rdpmc_always_available);
2091 else
2092 static_key_slow_dec(&rdpmc_always_available);
2093 on_each_cpu(refresh_pce, NULL, 1);
2094 }
2095
2096 x86_pmu.attr_rdpmc = val;
2097
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002098 return count;
2099}
2100
2101static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
2102
2103static struct attribute *x86_pmu_attrs[] = {
2104 &dev_attr_rdpmc.attr,
2105 NULL,
2106};
2107
2108static struct attribute_group x86_pmu_attr_group = {
2109 .attrs = x86_pmu_attrs,
2110};
2111
2112static const struct attribute_group *x86_pmu_attr_groups[] = {
2113 &x86_pmu_attr_group,
Jiri Olsa641cc932012-03-15 20:09:14 +01002114 &x86_pmu_format_group,
Jiri Olsaa4747392012-10-10 14:53:11 +02002115 &x86_pmu_events_group,
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002116 NULL,
2117};
2118
Yan, Zhengba532502014-11-04 21:55:58 -05002119static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
Stephane Eraniand010b332012-02-09 23:21:00 +01002120{
Yan, Zhengba532502014-11-04 21:55:58 -05002121 if (x86_pmu.sched_task)
2122 x86_pmu.sched_task(ctx, sched_in);
Stephane Eraniand010b332012-02-09 23:21:00 +01002123}
2124
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002125void perf_check_microcode(void)
2126{
2127 if (x86_pmu.check_microcode)
2128 x86_pmu.check_microcode();
2129}
2130EXPORT_SYMBOL_GPL(perf_check_microcode);
2131
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02002132static struct pmu pmu = {
Stephane Eraniand010b332012-02-09 23:21:00 +01002133 .pmu_enable = x86_pmu_enable,
2134 .pmu_disable = x86_pmu_disable,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002135
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002136 .attr_groups = x86_pmu_attr_groups,
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002137
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002138 .event_init = x86_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002139
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002140 .event_mapped = x86_pmu_event_mapped,
2141 .event_unmapped = x86_pmu_event_unmapped,
2142
Stephane Eraniand010b332012-02-09 23:21:00 +01002143 .add = x86_pmu_add,
2144 .del = x86_pmu_del,
2145 .start = x86_pmu_start,
2146 .stop = x86_pmu_stop,
2147 .read = x86_pmu_read,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002148
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002149 .start_txn = x86_pmu_start_txn,
2150 .cancel_txn = x86_pmu_cancel_txn,
2151 .commit_txn = x86_pmu_commit_txn,
Peter Zijlstrafe4a3302011-11-20 20:44:06 +01002152
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002153 .event_idx = x86_pmu_event_idx,
Yan, Zhengba532502014-11-04 21:55:58 -05002154 .sched_task = x86_pmu_sched_task,
Yan, Zhenge18bf522014-11-04 21:56:03 -05002155 .task_ctx_size = sizeof(struct x86_perf_task_context),
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02002156};
2157
Andy Lutomirskic1317ec2014-10-24 15:58:11 -07002158void arch_perf_update_userpage(struct perf_event *event,
2159 struct perf_event_mmap_page *userpg, u64 now)
Peter Zijlstrae3f35412011-11-21 11:43:53 +01002160{
Peter Zijlstra20d1c862013-11-29 15:40:29 +01002161 struct cyc2ns_data *data;
2162
Peter Zijlstrafa731582013-09-19 10:16:42 +02002163 userpg->cap_user_time = 0;
2164 userpg->cap_user_time_zero = 0;
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002165 userpg->cap_user_rdpmc =
2166 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
Peter Zijlstrac7206202012-03-22 17:26:36 +01002167 userpg->pmc_width = x86_pmu.cntval_bits;
2168
Peter Zijlstra35af99e2013-11-28 19:38:42 +01002169 if (!sched_clock_stable())
Peter Zijlstrae3f35412011-11-21 11:43:53 +01002170 return;
2171
Peter Zijlstra20d1c862013-11-29 15:40:29 +01002172 data = cyc2ns_read_begin();
2173
Peter Zijlstra34f43922015-02-20 14:05:38 +01002174 /*
2175 * Internal timekeeping for enabled/running/stopped times
2176 * is always in the local_clock domain.
2177 */
Peter Zijlstrafa731582013-09-19 10:16:42 +02002178 userpg->cap_user_time = 1;
Peter Zijlstra20d1c862013-11-29 15:40:29 +01002179 userpg->time_mult = data->cyc2ns_mul;
2180 userpg->time_shift = data->cyc2ns_shift;
2181 userpg->time_offset = data->cyc2ns_offset - now;
Adrian Hunterc73deb62013-06-28 16:22:18 +03002182
Peter Zijlstra34f43922015-02-20 14:05:38 +01002183 /*
2184 * cap_user_time_zero doesn't make sense when we're using a different
2185 * time base for the records.
2186 */
Alexander Shishkinf454bfd2016-04-14 14:59:49 +03002187 if (!event->attr.use_clockid) {
Peter Zijlstra34f43922015-02-20 14:05:38 +01002188 userpg->cap_user_time_zero = 1;
2189 userpg->time_zero = data->cyc2ns_offset;
2190 }
Peter Zijlstra20d1c862013-11-29 15:40:29 +01002191
2192 cyc2ns_read_end(data);
Peter Zijlstrae3f35412011-11-21 11:43:53 +01002193}
2194
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002195/*
2196 * callchain support
2197 */
2198
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002199static int backtrace_stack(void *data, char *name)
2200{
Ingo Molnar038e8362009-06-15 09:57:59 +02002201 return 0;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002202}
2203
Alexei Starovoitov568b3292016-02-17 19:58:57 -08002204static int backtrace_address(void *data, unsigned long addr, int reliable)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002205{
Arnaldo Carvalho de Melocfbcf462016-04-28 12:30:53 -03002206 struct perf_callchain_entry_ctx *entry = data;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002207
Alexei Starovoitov568b3292016-02-17 19:58:57 -08002208 return perf_callchain_store(entry, addr);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002209}
2210
2211static const struct stacktrace_ops backtrace_ops = {
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002212 .stack = backtrace_stack,
2213 .address = backtrace_address,
Frederic Weisbecker06d65bd2009-12-17 05:40:34 +01002214 .walk_stack = print_context_stack_bp,
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002215};
2216
Frederic Weisbecker56962b4442010-06-30 23:03:51 +02002217void
Arnaldo Carvalho de Melocfbcf462016-04-28 12:30:53 -03002218perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002219{
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002220 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2221 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02002222 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002223 }
2224
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02002225 perf_callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002226
Namhyung Kime8e999cf2011-03-18 11:40:06 +09002227 dump_trace(NULL, regs, NULL, 0, &backtrace_ops, entry);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002228}
2229
Arun Sharmabc6ca7b2012-04-20 15:41:35 -07002230static inline int
2231valid_user_frame(const void __user *fp, unsigned long size)
2232{
2233 return (__range_not_ok(fp, size, TASK_SIZE) == 0);
2234}
2235
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002236static unsigned long get_segment_base(unsigned int segment)
2237{
2238 struct desc_struct *desc;
2239 int idx = segment >> 3;
2240
2241 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -07002242#ifdef CONFIG_MODIFY_LDT_SYSCALL
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002243 struct ldt_struct *ldt;
2244
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002245 if (idx > LDT_ENTRIES)
2246 return 0;
2247
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002248 /* IRQs are off, so this synchronizes with smp_store_release */
2249 ldt = lockless_dereference(current->active_mm->context.ldt);
2250 if (!ldt || idx > ldt->size)
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002251 return 0;
2252
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002253 desc = &ldt->entries[idx];
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -07002254#else
2255 return 0;
2256#endif
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002257 } else {
2258 if (idx > GDT_ENTRIES)
2259 return 0;
2260
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002261 desc = raw_cpu_ptr(gdt_page.gdt) + idx;
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002262 }
2263
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002264 return get_desc_base(desc);
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002265}
2266
Brian Gerst10ed3492015-06-22 07:55:17 -04002267#ifdef CONFIG_IA32_EMULATION
H. Peter Anvind1a797f2012-02-19 10:06:34 -08002268
2269#include <asm/compat.h>
2270
Torok Edwin257ef9d2010-03-17 12:07:16 +02002271static inline int
Arnaldo Carvalho de Melocfbcf462016-04-28 12:30:53 -03002272perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002273{
Torok Edwin257ef9d2010-03-17 12:07:16 +02002274 /* 32-bit process in 64-bit kernel. */
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002275 unsigned long ss_base, cs_base;
Torok Edwin257ef9d2010-03-17 12:07:16 +02002276 struct stack_frame_ia32 frame;
2277 const void __user *fp;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002278
Torok Edwin257ef9d2010-03-17 12:07:16 +02002279 if (!test_thread_flag(TIF_IA32))
2280 return 0;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002281
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002282 cs_base = get_segment_base(regs->cs);
2283 ss_base = get_segment_base(regs->ss);
2284
2285 fp = compat_ptr(ss_base + regs->bp);
Andi Kleen75925e12015-10-22 15:07:21 -07002286 pagefault_disable();
Arnaldo Carvalho de Melo3b1fff02016-05-10 18:08:32 -03002287 while (entry->nr < entry->max_stack) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02002288 unsigned long bytes;
2289 frame.next_frame = 0;
2290 frame.return_address = 0;
2291
Andi Kleen75925e12015-10-22 15:07:21 -07002292 if (!access_ok(VERIFY_READ, fp, 8))
2293 break;
2294
2295 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
2296 if (bytes != 0)
2297 break;
2298 bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4);
Peter Zijlstra0a196842013-10-30 21:16:22 +01002299 if (bytes != 0)
Torok Edwin257ef9d2010-03-17 12:07:16 +02002300 break;
2301
Arun Sharmabc6ca7b2012-04-20 15:41:35 -07002302 if (!valid_user_frame(fp, sizeof(frame)))
2303 break;
2304
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002305 perf_callchain_store(entry, cs_base + frame.return_address);
2306 fp = compat_ptr(ss_base + frame.next_frame);
Torok Edwin257ef9d2010-03-17 12:07:16 +02002307 }
Andi Kleen75925e12015-10-22 15:07:21 -07002308 pagefault_enable();
Torok Edwin257ef9d2010-03-17 12:07:16 +02002309 return 1;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002310}
Torok Edwin257ef9d2010-03-17 12:07:16 +02002311#else
2312static inline int
Arnaldo Carvalho de Melocfbcf462016-04-28 12:30:53 -03002313perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
Torok Edwin257ef9d2010-03-17 12:07:16 +02002314{
2315 return 0;
2316}
2317#endif
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002318
Frederic Weisbecker56962b4442010-06-30 23:03:51 +02002319void
Arnaldo Carvalho de Melocfbcf462016-04-28 12:30:53 -03002320perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002321{
2322 struct stack_frame frame;
Josh Poimboeuffc188222016-07-01 23:02:05 -05002323 const unsigned long __user *fp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002324
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002325 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2326 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02002327 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002328 }
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002329
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002330 /*
2331 * We don't know what to do with VM86 stacks.. ignore them for now.
2332 */
2333 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
2334 return;
2335
Josh Poimboeuffc188222016-07-01 23:02:05 -05002336 fp = (unsigned long __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002337
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02002338 perf_callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002339
Andrey Vagin20afc602011-08-30 12:32:36 +04002340 if (!current->mm)
2341 return;
2342
Torok Edwin257ef9d2010-03-17 12:07:16 +02002343 if (perf_callchain_user32(regs, entry))
2344 return;
2345
Andi Kleen75925e12015-10-22 15:07:21 -07002346 pagefault_disable();
Arnaldo Carvalho de Melo3b1fff02016-05-10 18:08:32 -03002347 while (entry->nr < entry->max_stack) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02002348 unsigned long bytes;
Josh Poimboeuffc188222016-07-01 23:02:05 -05002349
Ingo Molnar038e8362009-06-15 09:57:59 +02002350 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002351 frame.return_address = 0;
2352
Josh Poimboeuffc188222016-07-01 23:02:05 -05002353 if (!access_ok(VERIFY_READ, fp, sizeof(*fp) * 2))
Andi Kleen75925e12015-10-22 15:07:21 -07002354 break;
2355
Josh Poimboeuffc188222016-07-01 23:02:05 -05002356 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
Andi Kleen75925e12015-10-22 15:07:21 -07002357 if (bytes != 0)
2358 break;
Josh Poimboeuffc188222016-07-01 23:02:05 -05002359 bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
Peter Zijlstra0a196842013-10-30 21:16:22 +01002360 if (bytes != 0)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002361 break;
2362
Arun Sharmabc6ca7b2012-04-20 15:41:35 -07002363 if (!valid_user_frame(fp, sizeof(frame)))
2364 break;
2365
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02002366 perf_callchain_store(entry, frame.return_address);
Andi Kleen75925e12015-10-22 15:07:21 -07002367 fp = (void __user *)frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002368 }
Andi Kleen75925e12015-10-22 15:07:21 -07002369 pagefault_enable();
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002370}
2371
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002372/*
2373 * Deal with code segment offsets for the various execution modes:
2374 *
2375 * VM86 - the good olde 16 bit days, where the linear address is
2376 * 20 bits and we use regs->ip + 0x10 * regs->cs.
2377 *
2378 * IA32 - Where we need to look at GDT/LDT segment descriptor tables
2379 * to figure out what the 32bit base address is.
2380 *
2381 * X32 - has TIF_X32 set, but is running in x86_64
2382 *
2383 * X86_64 - CS,DS,SS,ES are all zero based.
2384 */
2385static unsigned long code_segment_base(struct pt_regs *regs)
2386{
2387 /*
Andy Lutomirski383f3af2015-03-18 18:33:30 -07002388 * For IA32 we look at the GDT/LDT segment base to convert the
2389 * effective IP to a linear address.
2390 */
2391
2392#ifdef CONFIG_X86_32
2393 /*
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002394 * If we are in VM86 mode, add the segment offset to convert to a
2395 * linear address.
2396 */
2397 if (regs->flags & X86_VM_MASK)
2398 return 0x10 * regs->cs;
2399
Ingo Molnar55474c42015-03-29 11:02:34 +02002400 if (user_mode(regs) && regs->cs != __USER_CS)
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002401 return get_segment_base(regs->cs);
2402#else
Andy Lutomirskic56716a2015-03-18 18:33:28 -07002403 if (user_mode(regs) && !user_64bit_mode(regs) &&
2404 regs->cs != __USER32_CS)
2405 return get_segment_base(regs->cs);
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002406#endif
2407 return 0;
2408}
2409
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002410unsigned long perf_instruction_pointer(struct pt_regs *regs)
2411{
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002412 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002413 return perf_guest_cbs->get_guest_ip();
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08002414
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002415 return regs->ip + code_segment_base(regs);
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002416}
2417
2418unsigned long perf_misc_flags(struct pt_regs *regs)
2419{
2420 int misc = 0;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08002421
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002422 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08002423 if (perf_guest_cbs->is_user_mode())
2424 misc |= PERF_RECORD_MISC_GUEST_USER;
2425 else
2426 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
2427 } else {
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002428 if (user_mode(regs))
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08002429 misc |= PERF_RECORD_MISC_USER;
2430 else
2431 misc |= PERF_RECORD_MISC_KERNEL;
2432 }
2433
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002434 if (regs->flags & PERF_EFLAGS_EXACT)
Peter Zijlstraab608342010-04-08 23:03:20 +02002435 misc |= PERF_RECORD_MISC_EXACT_IP;
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002436
2437 return misc;
2438}
Gleb Natapovb3d94682011-11-10 14:57:27 +02002439
2440void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
2441{
2442 cap->version = x86_pmu.version;
2443 cap->num_counters_gp = x86_pmu.num_counters;
2444 cap->num_counters_fixed = x86_pmu.num_counters_fixed;
2445 cap->bit_width_gp = x86_pmu.cntval_bits;
2446 cap->bit_width_fixed = x86_pmu.cntval_bits;
2447 cap->events_mask = (unsigned int)x86_pmu.events_maskl;
2448 cap->events_mask_len = x86_pmu.events_mask_len;
2449}
2450EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);