blob: 655a65eaf105a7e68eb6ce44a1ca7b03bac6b50f [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
Peter Zijlstra90eec102015-11-16 11:08:45 +01008 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Paul Gortmakereb008eb2016-07-13 20:19:01 -040020#include <linux/export.h>
21#include <linux/init.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010022#include <linux/kdebug.h>
23#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020024#include <linux/uaccess.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020026#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010027#include <linux/bitops.h>
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +010028#include <linux/device.h>
Peter Zijlstra70e65f22018-04-20 14:08:58 +020029#include <linux/nospec.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010030
Ingo Molnar241771e2008-12-03 10:39:53 +010031#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020032#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020033#include <asm/nmi.h>
Lin Ming69092622011-03-03 10:34:50 +080034#include <asm/smp.h>
Robert Richterc8e59102011-04-16 02:27:55 +020035#include <asm/alternative.h>
Andy Lutomirski7911d3f2014-10-24 15:58:12 -070036#include <asm/mmu_context.h>
Andy Lutomirski375074c2014-10-24 15:58:07 -070037#include <asm/tlbflush.h>
Peter Zijlstrae3f35412011-11-21 11:43:53 +010038#include <asm/timer.h>
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +020039#include <asm/desc.h>
40#include <asm/ldt.h>
Josh Poimboeuf35f4d9b2016-09-16 14:18:13 -050041#include <asm/unwind.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010042
Borislav Petkov27f6d222016-02-10 10:55:23 +010043#include "perf_event.h"
Kevin Winchesterde0428a2011-08-30 20:41:05 -030044
Kevin Winchesterde0428a2011-08-30 20:41:05 -030045struct x86_pmu x86_pmu __read_mostly;
Stephane Eranianefc9f052011-06-06 16:57:03 +020046
Kevin Winchesterde0428a2011-08-30 20:41:05 -030047DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010048 .enabled = 1,
49};
Ingo Molnar241771e2008-12-03 10:39:53 +010050
Andy Lutomirskia6673422014-10-24 15:58:13 -070051struct static_key rdpmc_always_available = STATIC_KEY_INIT_FALSE;
52
Kevin Winchesterde0428a2011-08-30 20:41:05 -030053u64 __read_mostly hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +020054 [PERF_COUNT_HW_CACHE_MAX]
55 [PERF_COUNT_HW_CACHE_OP_MAX]
56 [PERF_COUNT_HW_CACHE_RESULT_MAX];
Kevin Winchesterde0428a2011-08-30 20:41:05 -030057u64 __read_mostly hw_cache_extra_regs
Andi Kleene994d7d2011-03-03 10:34:48 +080058 [PERF_COUNT_HW_CACHE_MAX]
59 [PERF_COUNT_HW_CACHE_OP_MAX]
60 [PERF_COUNT_HW_CACHE_RESULT_MAX];
Ingo Molnar8326f442009-06-05 20:22:46 +020061
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +053062/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020063 * Propagate event elapsed time into the generic event.
64 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +010065 * Returns the delta events processed.
66 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -030067u64 x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +010068{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +010069 struct hw_perf_event *hwc = &event->hw;
Robert Richter948b1bb2010-03-29 18:36:50 +020070 int shift = 64 - x86_pmu.cntval_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +020071 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +010072 int idx = hwc->idx;
Peter Zijlstra (Intel)7f612a72016-11-29 20:33:28 +000073 u64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +010074
Robert Richter15c7ad52012-06-20 20:46:33 +020075 if (idx == INTEL_PMC_IDX_FIXED_BTS)
Markus Metzger30dd5682009-07-21 15:56:48 +020076 return 0;
77
Ingo Molnaree060942008-12-13 09:00:03 +010078 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020079 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +010080 *
81 * Our tactic to handle this is to first atomically read and
82 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +020083 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +010084 */
85again:
Peter Zijlstrae7850592010-05-21 14:43:08 +020086 prev_raw_count = local64_read(&hwc->prev_count);
Vince Weaverc48b6052012-03-01 17:28:14 -050087 rdpmcl(hwc->event_base_rdpmc, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +010088
Peter Zijlstrae7850592010-05-21 14:43:08 +020089 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Ingo Molnaree060942008-12-13 09:00:03 +010090 new_raw_count) != prev_raw_count)
91 goto again;
92
93 /*
94 * Now we have the new raw value and have updated the prev
95 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +020096 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +010097 *
98 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +020099 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +0100100 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200101 delta = (new_raw_count << shift) - (prev_raw_count << shift);
102 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100103
Peter Zijlstrae7850592010-05-21 14:43:08 +0200104 local64_add(delta, &event->count);
105 local64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200106
107 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100108}
109
Andi Kleena7e3ed12011-03-03 10:34:47 +0800110/*
111 * Find and validate any extra registers to set up.
112 */
113static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
114{
Stephane Eranianefc9f052011-06-06 16:57:03 +0200115 struct hw_perf_event_extra *reg;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800116 struct extra_reg *er;
117
Stephane Eranianefc9f052011-06-06 16:57:03 +0200118 reg = &event->hw.extra_reg;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800119
120 if (!x86_pmu.extra_regs)
121 return 0;
122
123 for (er = x86_pmu.extra_regs; er->msr; er++) {
124 if (er->event != (config & er->config_mask))
125 continue;
126 if (event->attr.config1 & ~er->valid_mask)
127 return -EINVAL;
Kan Liang338b5222014-07-14 12:25:56 -0700128 /* Check if the extra msrs can be safely accessed*/
129 if (!er->extra_msr_access)
130 return -ENXIO;
Stephane Eranianefc9f052011-06-06 16:57:03 +0200131
132 reg->idx = er->idx;
133 reg->config = event->attr.config1;
134 reg->reg = er->msr;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800135 break;
136 }
137 return 0;
138}
139
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200140static atomic_t active_events;
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300141static atomic_t pmc_refcount;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200142static DEFINE_MUTEX(pmc_reserve_mutex);
143
Robert Richterb27ea292010-03-17 12:49:10 +0100144#ifdef CONFIG_X86_LOCAL_APIC
145
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200146static bool reserve_pmc_hardware(void)
147{
148 int i;
149
Robert Richter948b1bb2010-03-29 18:36:50 +0200150 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100151 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200152 goto perfctr_fail;
153 }
154
Robert Richter948b1bb2010-03-29 18:36:50 +0200155 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100156 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200157 goto eventsel_fail;
158 }
159
160 return true;
161
162eventsel_fail:
163 for (i--; i >= 0; i--)
Robert Richter41bf4982011-02-02 17:40:57 +0100164 release_evntsel_nmi(x86_pmu_config_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200165
Robert Richter948b1bb2010-03-29 18:36:50 +0200166 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200167
168perfctr_fail:
169 for (i--; i >= 0; i--)
Robert Richter41bf4982011-02-02 17:40:57 +0100170 release_perfctr_nmi(x86_pmu_event_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200171
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200172 return false;
173}
174
175static void release_pmc_hardware(void)
176{
177 int i;
178
Robert Richter948b1bb2010-03-29 18:36:50 +0200179 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100180 release_perfctr_nmi(x86_pmu_event_addr(i));
181 release_evntsel_nmi(x86_pmu_config_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200182 }
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200183}
184
Robert Richterb27ea292010-03-17 12:49:10 +0100185#else
186
187static bool reserve_pmc_hardware(void) { return true; }
188static void release_pmc_hardware(void) {}
189
190#endif
191
Don Zickus33c6d6a2010-11-22 16:55:23 -0500192static bool check_hw_exists(void)
193{
Arnd Bergmanne72c7a32017-07-19 14:52:59 +0200194 u64 val, val_fail = -1, val_new= ~0;
195 int i, reg, reg_fail = -1, ret = 0;
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100196 int bios_fail = 0;
Don Zickus68ab7472015-05-18 15:16:48 -0400197 int reg_safe = -1;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500198
Peter Zijlstra44072042010-12-08 15:56:23 +0100199 /*
200 * Check to see if the BIOS enabled any of the counters, if so
201 * complain and bail.
202 */
203 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100204 reg = x86_pmu_config_addr(i);
Peter Zijlstra44072042010-12-08 15:56:23 +0100205 ret = rdmsrl_safe(reg, &val);
206 if (ret)
207 goto msr_fail;
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100208 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
209 bios_fail = 1;
210 val_fail = val;
211 reg_fail = reg;
Don Zickus68ab7472015-05-18 15:16:48 -0400212 } else {
213 reg_safe = i;
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100214 }
Peter Zijlstra44072042010-12-08 15:56:23 +0100215 }
216
217 if (x86_pmu.num_counters_fixed) {
218 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
219 ret = rdmsrl_safe(reg, &val);
220 if (ret)
221 goto msr_fail;
222 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100223 if (val & (0x03 << i*4)) {
224 bios_fail = 1;
225 val_fail = val;
226 reg_fail = reg;
227 }
Peter Zijlstra44072042010-12-08 15:56:23 +0100228 }
229 }
230
231 /*
Don Zickus68ab7472015-05-18 15:16:48 -0400232 * If all the counters are enabled, the below test will always
233 * fail. The tools will also become useless in this scenario.
234 * Just fail and disable the hardware counters.
235 */
236
237 if (reg_safe == -1) {
238 reg = reg_safe;
239 goto msr_fail;
240 }
241
242 /*
Andre Przywarabffd5fc2012-10-09 17:38:35 +0200243 * Read the current value, change it and read it back to see if it
244 * matches, this is needed to detect certain hardware emulators
245 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
Peter Zijlstra44072042010-12-08 15:56:23 +0100246 */
Don Zickus68ab7472015-05-18 15:16:48 -0400247 reg = x86_pmu_event_addr(reg_safe);
Andre Przywarabffd5fc2012-10-09 17:38:35 +0200248 if (rdmsrl_safe(reg, &val))
249 goto msr_fail;
250 val ^= 0xffffUL;
Robert Richterf285f922012-06-20 20:46:36 +0200251 ret = wrmsrl_safe(reg, val);
252 ret |= rdmsrl_safe(reg, &val_new);
Don Zickus33c6d6a2010-11-22 16:55:23 -0500253 if (ret || val != val_new)
Peter Zijlstra44072042010-12-08 15:56:23 +0100254 goto msr_fail;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500255
Ingo Molnar45daae52011-03-25 10:24:23 +0100256 /*
257 * We still allow the PMU driver to operate:
258 */
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100259 if (bios_fail) {
Chen Yucong1b74dde2016-02-02 11:45:02 +0800260 pr_cont("Broken BIOS detected, complain to your hardware vendor.\n");
261 pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n",
262 reg_fail, val_fail);
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100263 }
Ingo Molnar45daae52011-03-25 10:24:23 +0100264
265 return true;
Peter Zijlstra44072042010-12-08 15:56:23 +0100266
267msr_fail:
Juergen Gross005bd002016-08-01 13:37:07 +0200268 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
269 pr_cont("PMU not available due to virtualization, using software events only.\n");
270 } else {
271 pr_cont("Broken PMU hardware detected, using software events only.\n");
272 pr_err("Failed to access perfctr msr (MSR %x is %Lx)\n",
273 reg, val_new);
274 }
Ingo Molnar45daae52011-03-25 10:24:23 +0100275
Peter Zijlstra44072042010-12-08 15:56:23 +0100276 return false;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500277}
278
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200279static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200280{
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300281 x86_release_hardware();
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300282 atomic_dec(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200283}
284
Alexander Shishkin48070342015-01-14 14:18:20 +0200285void hw_perf_lbr_event_destroy(struct perf_event *event)
286{
287 hw_perf_event_destroy(event);
288
289 /* undo the lbr/bts event accounting */
290 x86_del_exclusive(x86_lbr_exclusive_lbr);
291}
292
Robert Richter85cf9db2009-04-29 12:47:20 +0200293static inline int x86_pmu_initialized(void)
294{
295 return x86_pmu.handle_irq != NULL;
296}
297
Ingo Molnar8326f442009-06-05 20:22:46 +0200298static inline int
Andi Kleene994d7d2011-03-03 10:34:48 +0800299set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
Ingo Molnar8326f442009-06-05 20:22:46 +0200300{
Andi Kleene994d7d2011-03-03 10:34:48 +0800301 struct perf_event_attr *attr = &event->attr;
Ingo Molnar8326f442009-06-05 20:22:46 +0200302 unsigned int cache_type, cache_op, cache_result;
303 u64 config, val;
304
305 config = attr->config;
306
Peter Zijlstra662218f2018-04-20 14:06:29 +0200307 cache_type = (config >> 0) & 0xff;
Ingo Molnar8326f442009-06-05 20:22:46 +0200308 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
309 return -EINVAL;
Peter Zijlstra662218f2018-04-20 14:06:29 +0200310 cache_type = array_index_nospec(cache_type, PERF_COUNT_HW_CACHE_MAX);
Ingo Molnar8326f442009-06-05 20:22:46 +0200311
312 cache_op = (config >> 8) & 0xff;
313 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
314 return -EINVAL;
Peter Zijlstra662218f2018-04-20 14:06:29 +0200315 cache_op = array_index_nospec(cache_op, PERF_COUNT_HW_CACHE_OP_MAX);
Ingo Molnar8326f442009-06-05 20:22:46 +0200316
317 cache_result = (config >> 16) & 0xff;
318 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
319 return -EINVAL;
Peter Zijlstra662218f2018-04-20 14:06:29 +0200320 cache_result = array_index_nospec(cache_result, PERF_COUNT_HW_CACHE_RESULT_MAX);
Ingo Molnar8326f442009-06-05 20:22:46 +0200321
322 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
323
324 if (val == 0)
325 return -ENOENT;
326
327 if (val == -1)
328 return -EINVAL;
329
330 hwc->config |= val;
Andi Kleene994d7d2011-03-03 10:34:48 +0800331 attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
332 return x86_pmu_extra_regs(val, event);
Ingo Molnar8326f442009-06-05 20:22:46 +0200333}
334
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300335int x86_reserve_hardware(void)
336{
337 int err = 0;
338
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300339 if (!atomic_inc_not_zero(&pmc_refcount)) {
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300340 mutex_lock(&pmc_reserve_mutex);
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300341 if (atomic_read(&pmc_refcount) == 0) {
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300342 if (!reserve_pmc_hardware())
343 err = -EBUSY;
344 else
345 reserve_ds_buffers();
346 }
347 if (!err)
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300348 atomic_inc(&pmc_refcount);
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300349 mutex_unlock(&pmc_reserve_mutex);
350 }
351
352 return err;
353}
354
355void x86_release_hardware(void)
356{
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300357 if (atomic_dec_and_mutex_lock(&pmc_refcount, &pmc_reserve_mutex)) {
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300358 release_pmc_hardware();
359 release_ds_buffers();
360 mutex_unlock(&pmc_reserve_mutex);
361 }
362}
363
Alexander Shishkin48070342015-01-14 14:18:20 +0200364/*
365 * Check if we can create event of a certain type (that no conflicting events
366 * are present).
367 */
368int x86_add_exclusive(unsigned int what)
369{
Peter Zijlstra93472af2015-06-24 16:47:50 +0200370 int i;
Alexander Shishkin48070342015-01-14 14:18:20 +0200371
Andi Kleen54fa1902016-12-08 16:14:17 -0800372 /*
373 * When lbr_pt_coexist we allow PT to coexist with either LBR or BTS.
374 * LBR and BTS are still mutually exclusive.
375 */
376 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
Alexander Shishkinccbebba2016-04-28 18:35:46 +0300377 return 0;
378
Peter Zijlstra93472af2015-06-24 16:47:50 +0200379 if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
380 mutex_lock(&pmc_reserve_mutex);
381 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
382 if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
383 goto fail_unlock;
384 }
385 atomic_inc(&x86_pmu.lbr_exclusive[what]);
386 mutex_unlock(&pmc_reserve_mutex);
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300387 }
Alexander Shishkin48070342015-01-14 14:18:20 +0200388
Peter Zijlstra93472af2015-06-24 16:47:50 +0200389 atomic_inc(&active_events);
390 return 0;
Alexander Shishkin48070342015-01-14 14:18:20 +0200391
Peter Zijlstra93472af2015-06-24 16:47:50 +0200392fail_unlock:
Alexander Shishkin48070342015-01-14 14:18:20 +0200393 mutex_unlock(&pmc_reserve_mutex);
Peter Zijlstra93472af2015-06-24 16:47:50 +0200394 return -EBUSY;
Alexander Shishkin48070342015-01-14 14:18:20 +0200395}
396
397void x86_del_exclusive(unsigned int what)
398{
Andi Kleen54fa1902016-12-08 16:14:17 -0800399 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
Alexander Shishkinccbebba2016-04-28 18:35:46 +0300400 return;
401
Alexander Shishkin48070342015-01-14 14:18:20 +0200402 atomic_dec(&x86_pmu.lbr_exclusive[what]);
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300403 atomic_dec(&active_events);
Alexander Shishkin48070342015-01-14 14:18:20 +0200404}
405
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300406int x86_setup_perfctr(struct perf_event *event)
Robert Richterc1726f32010-04-13 22:23:11 +0200407{
408 struct perf_event_attr *attr = &event->attr;
409 struct hw_perf_event *hwc = &event->hw;
410 u64 config;
411
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +0100412 if (!is_sampling_event(event)) {
Robert Richterc1726f32010-04-13 22:23:11 +0200413 hwc->sample_period = x86_pmu.max_period;
414 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200415 local64_set(&hwc->period_left, hwc->sample_period);
Robert Richterc1726f32010-04-13 22:23:11 +0200416 }
417
418 if (attr->type == PERF_TYPE_RAW)
Peter Zijlstraed13ec52011-11-14 10:03:25 +0100419 return x86_pmu_extra_regs(event->attr.config, event);
Robert Richterc1726f32010-04-13 22:23:11 +0200420
421 if (attr->type == PERF_TYPE_HW_CACHE)
Andi Kleene994d7d2011-03-03 10:34:48 +0800422 return set_ext_hw_attr(hwc, event);
Robert Richterc1726f32010-04-13 22:23:11 +0200423
424 if (attr->config >= x86_pmu.max_events)
425 return -EINVAL;
426
Peter Zijlstra70e65f22018-04-20 14:08:58 +0200427 attr->config = array_index_nospec((unsigned long)attr->config, x86_pmu.max_events);
428
Robert Richterc1726f32010-04-13 22:23:11 +0200429 /*
430 * The generic map:
431 */
432 config = x86_pmu.event_map(attr->config);
433
434 if (config == 0)
435 return -ENOENT;
436
437 if (config == -1LL)
438 return -EINVAL;
439
440 /*
441 * Branch tracing:
442 */
Peter Zijlstra18a073a2011-04-26 13:24:33 +0200443 if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
444 !attr->freq && hwc->sample_period == 1) {
Robert Richterc1726f32010-04-13 22:23:11 +0200445 /* BTS is not supported by this architecture. */
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200446 if (!x86_pmu.bts_active)
Robert Richterc1726f32010-04-13 22:23:11 +0200447 return -EOPNOTSUPP;
448
449 /* BTS is currently only allowed for user-mode. */
450 if (!attr->exclude_kernel)
451 return -EOPNOTSUPP;
Alexander Shishkin48070342015-01-14 14:18:20 +0200452
453 /* disallow bts if conflicting events are present */
454 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
455 return -EBUSY;
456
457 event->destroy = hw_perf_lbr_event_destroy;
Robert Richterc1726f32010-04-13 22:23:11 +0200458 }
459
460 hwc->config |= config;
461
462 return 0;
463}
Robert Richter4261e0e2010-04-13 22:23:10 +0200464
Stephane Eranianff3fb512012-02-09 23:20:54 +0100465/*
466 * check that branch_sample_type is compatible with
467 * settings needed for precise_ip > 1 which implies
468 * using the LBR to capture ALL taken branches at the
469 * priv levels of the measurement
470 */
471static inline int precise_br_compat(struct perf_event *event)
472{
473 u64 m = event->attr.branch_sample_type;
474 u64 b = 0;
475
476 /* must capture all branches */
477 if (!(m & PERF_SAMPLE_BRANCH_ANY))
478 return 0;
479
480 m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
481
482 if (!event->attr.exclude_user)
483 b |= PERF_SAMPLE_BRANCH_USER;
484
485 if (!event->attr.exclude_kernel)
486 b |= PERF_SAMPLE_BRANCH_KERNEL;
487
488 /*
489 * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
490 */
491
492 return m == b;
493}
494
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300495int x86_pmu_hw_config(struct perf_event *event)
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300496{
Peter Zijlstraab608342010-04-08 23:03:20 +0200497 if (event->attr.precise_ip) {
498 int precise = 0;
499
500 /* Support for constant skid */
Peter Zijlstrac93dc842012-06-08 14:50:50 +0200501 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
Peter Zijlstraab608342010-04-08 23:03:20 +0200502 precise++;
503
Peter Zijlstra5553be22010-10-19 14:38:11 +0200504 /* Support for IP fixup */
Andi Kleen03de8742014-08-07 17:08:54 -0700505 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
Peter Zijlstra5553be22010-10-19 14:38:11 +0200506 precise++;
Andi Kleen72469762015-12-04 03:50:52 -0800507
508 if (x86_pmu.pebs_prec_dist)
509 precise++;
Peter Zijlstra5553be22010-10-19 14:38:11 +0200510 }
Peter Zijlstraab608342010-04-08 23:03:20 +0200511
512 if (event->attr.precise_ip > precise)
513 return -EOPNOTSUPP;
Jiri Olsa82835fb2017-01-03 15:24:54 +0100514
515 /* There's no sense in having PEBS for non sampling events: */
516 if (!is_sampling_event(event))
517 return -EINVAL;
Yan, Zheng4b854902014-11-04 21:56:08 -0500518 }
519 /*
520 * check that PEBS LBR correction does not conflict with
521 * whatever the user is asking with attr->branch_sample_type
522 */
523 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) {
524 u64 *br_type = &event->attr.branch_sample_type;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100525
Yan, Zheng4b854902014-11-04 21:56:08 -0500526 if (has_branch_stack(event)) {
527 if (!precise_br_compat(event))
528 return -EOPNOTSUPP;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100529
Yan, Zheng4b854902014-11-04 21:56:08 -0500530 /* branch_sample_type is compatible */
Stephane Eranianff3fb512012-02-09 23:20:54 +0100531
Yan, Zheng4b854902014-11-04 21:56:08 -0500532 } else {
533 /*
534 * user did not specify branch_sample_type
535 *
536 * For PEBS fixups, we capture all
537 * the branches at the priv level of the
538 * event.
539 */
540 *br_type = PERF_SAMPLE_BRANCH_ANY;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100541
Yan, Zheng4b854902014-11-04 21:56:08 -0500542 if (!event->attr.exclude_user)
543 *br_type |= PERF_SAMPLE_BRANCH_USER;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100544
Yan, Zheng4b854902014-11-04 21:56:08 -0500545 if (!event->attr.exclude_kernel)
546 *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100547 }
Peter Zijlstraab608342010-04-08 23:03:20 +0200548 }
549
Yan, Zhenge18bf522014-11-04 21:56:03 -0500550 if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK)
551 event->attach_state |= PERF_ATTACH_TASK_DATA;
552
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300553 /*
554 * Generate PMC IRQs:
555 * (keep 'enabled' bit clear for now)
556 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200557 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300558
559 /*
560 * Count user and OS events unless requested not to
561 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200562 if (!event->attr.exclude_user)
563 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
564 if (!event->attr.exclude_kernel)
565 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
566
567 if (event->attr.type == PERF_TYPE_RAW)
568 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300569
Andi Kleen294fe0f2015-02-17 18:18:06 -0800570 if (event->attr.sample_period && x86_pmu.limit_period) {
571 if (x86_pmu.limit_period(event, event->attr.sample_period) >
572 event->attr.sample_period)
573 return -EINVAL;
574 }
575
Robert Richter9d0fcba62010-04-13 22:23:12 +0200576 return x86_setup_perfctr(event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300577}
578
Ingo Molnaree060942008-12-13 09:00:03 +0100579/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200580 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100581 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200582static int __x86_pmu_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100583{
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200584 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100585
Robert Richter85cf9db2009-04-29 12:47:20 +0200586 if (!x86_pmu_initialized())
587 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100588
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300589 err = x86_reserve_hardware();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200590 if (err)
591 return err;
592
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300593 atomic_inc(&active_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200594 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200595
Robert Richter4261e0e2010-04-13 22:23:10 +0200596 event->hw.idx = -1;
597 event->hw.last_cpu = -1;
598 event->hw.last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200599
Stephane Eranianefc9f052011-06-06 16:57:03 +0200600 /* mark unused */
601 event->hw.extra_reg.idx = EXTRA_REG_NONE;
Stephane Eranianb36817e2012-02-09 23:20:53 +0100602 event->hw.branch_reg.idx = EXTRA_REG_NONE;
603
Robert Richter9d0fcba62010-04-13 22:23:12 +0200604 return x86_pmu.hw_config(event);
Robert Richter4261e0e2010-04-13 22:23:10 +0200605}
606
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300607void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530608{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500609 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200610 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100611
Robert Richter948b1bb2010-03-29 18:36:50 +0200612 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100613 u64 val;
614
Robert Richter43f62012009-04-29 16:55:56 +0200615 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200616 continue;
Robert Richter41bf4982011-02-02 17:40:57 +0100617 rdmsrl(x86_pmu_config_addr(idx), val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100618 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200619 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100620 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Robert Richter41bf4982011-02-02 17:40:57 +0100621 wrmsrl(x86_pmu_config_addr(idx), val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530622 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530623}
624
Kan Liangc3d266c2016-03-03 18:07:28 -0500625/*
626 * There may be PMI landing after enabled=0. The PMI hitting could be before or
627 * after disable_all.
628 *
629 * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
630 * It will not be re-enabled in the NMI handler again, because enabled=0. After
631 * handling the NMI, disable_all will be called, which will not change the
632 * state either. If PMI hits after disable_all, the PMU is already disabled
633 * before entering NMI handler. The NMI handler will not change the state
634 * either.
635 *
636 * So either situation is harmless.
637 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200638static void x86_pmu_disable(struct pmu *pmu)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530639{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500640 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200641
Robert Richter85cf9db2009-04-29 12:47:20 +0200642 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200643 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200644
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100645 if (!cpuc->enabled)
646 return;
647
648 cpuc->n_added = 0;
649 cpuc->enabled = 0;
650 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200651
652 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530653}
Ingo Molnar241771e2008-12-03 10:39:53 +0100654
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300655void x86_pmu_enable_all(int added)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530656{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500657 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530658 int idx;
659
Robert Richter948b1bb2010-03-29 18:36:50 +0200660 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richterd45dd922011-02-02 17:40:56 +0100661 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100662
Robert Richter43f62012009-04-29 16:55:56 +0200663 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200664 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200665
Robert Richterd45dd922011-02-02 17:40:56 +0100666 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530667 }
668}
669
Peter Zijlstra51b0fe32010-06-11 13:35:57 +0200670static struct pmu pmu;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200671
672static inline int is_x86_event(struct perf_event *event)
673{
674 return event->pmu == &pmu;
675}
676
Robert Richter1e2ad282011-11-18 12:35:21 +0100677/*
678 * Event scheduler state:
679 *
680 * Assign events iterating over all events and counters, beginning
681 * with events with least weights first. Keep the current iterator
682 * state in struct sched_state.
683 */
684struct sched_state {
685 int weight;
686 int event; /* event index */
687 int counter; /* counter index */
688 int unassigned; /* number of events to be assigned left */
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200689 int nr_gp; /* number of GP counters used */
Robert Richter1e2ad282011-11-18 12:35:21 +0100690 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
691};
692
Robert Richterbc1738f2011-11-18 12:35:22 +0100693/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
694#define SCHED_STATES_MAX 2
695
Robert Richter1e2ad282011-11-18 12:35:21 +0100696struct perf_sched {
697 int max_weight;
698 int max_events;
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200699 int max_gp;
700 int saved_states;
Peter Zijlstrab371b592015-05-21 10:57:13 +0200701 struct event_constraint **constraints;
Robert Richter1e2ad282011-11-18 12:35:21 +0100702 struct sched_state state;
Robert Richterbc1738f2011-11-18 12:35:22 +0100703 struct sched_state saved[SCHED_STATES_MAX];
Robert Richter1e2ad282011-11-18 12:35:21 +0100704};
705
706/*
707 * Initialize interator that runs through all events and counters.
708 */
Peter Zijlstrab371b592015-05-21 10:57:13 +0200709static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200710 int num, int wmin, int wmax, int gpmax)
Robert Richter1e2ad282011-11-18 12:35:21 +0100711{
712 int idx;
713
714 memset(sched, 0, sizeof(*sched));
715 sched->max_events = num;
716 sched->max_weight = wmax;
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200717 sched->max_gp = gpmax;
Peter Zijlstrab371b592015-05-21 10:57:13 +0200718 sched->constraints = constraints;
Robert Richter1e2ad282011-11-18 12:35:21 +0100719
720 for (idx = 0; idx < num; idx++) {
Peter Zijlstrab371b592015-05-21 10:57:13 +0200721 if (constraints[idx]->weight == wmin)
Robert Richter1e2ad282011-11-18 12:35:21 +0100722 break;
723 }
724
725 sched->state.event = idx; /* start with min weight */
726 sched->state.weight = wmin;
727 sched->state.unassigned = num;
728}
729
Robert Richterbc1738f2011-11-18 12:35:22 +0100730static void perf_sched_save_state(struct perf_sched *sched)
731{
732 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
733 return;
734
735 sched->saved[sched->saved_states] = sched->state;
736 sched->saved_states++;
737}
738
739static bool perf_sched_restore_state(struct perf_sched *sched)
740{
741 if (!sched->saved_states)
742 return false;
743
744 sched->saved_states--;
745 sched->state = sched->saved[sched->saved_states];
746
747 /* continue with next counter: */
748 clear_bit(sched->state.counter++, sched->state.used);
749
750 return true;
751}
752
Robert Richter1e2ad282011-11-18 12:35:21 +0100753/*
754 * Select a counter for the current event to schedule. Return true on
755 * success.
756 */
Robert Richterbc1738f2011-11-18 12:35:22 +0100757static bool __perf_sched_find_counter(struct perf_sched *sched)
Robert Richter1e2ad282011-11-18 12:35:21 +0100758{
759 struct event_constraint *c;
760 int idx;
761
762 if (!sched->state.unassigned)
763 return false;
764
765 if (sched->state.event >= sched->max_events)
766 return false;
767
Peter Zijlstrab371b592015-05-21 10:57:13 +0200768 c = sched->constraints[sched->state.event];
Peter Zijlstra4defea82011-11-10 15:15:42 +0100769 /* Prefer fixed purpose counters */
Robert Richter15c7ad52012-06-20 20:46:33 +0200770 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
771 idx = INTEL_PMC_IDX_FIXED;
Akinobu Mita307b1cd2012-03-23 15:02:03 -0700772 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
Peter Zijlstra4defea82011-11-10 15:15:42 +0100773 if (!__test_and_set_bit(idx, sched->state.used))
774 goto done;
775 }
776 }
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200777
Robert Richter1e2ad282011-11-18 12:35:21 +0100778 /* Grab the first unused counter starting with idx */
779 idx = sched->state.counter;
Robert Richter15c7ad52012-06-20 20:46:33 +0200780 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200781 if (!__test_and_set_bit(idx, sched->state.used)) {
782 if (sched->state.nr_gp++ >= sched->max_gp)
783 return false;
784
Peter Zijlstra4defea82011-11-10 15:15:42 +0100785 goto done;
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200786 }
Robert Richter1e2ad282011-11-18 12:35:21 +0100787 }
Robert Richter1e2ad282011-11-18 12:35:21 +0100788
Peter Zijlstra4defea82011-11-10 15:15:42 +0100789 return false;
790
791done:
792 sched->state.counter = idx;
Robert Richter1e2ad282011-11-18 12:35:21 +0100793
Robert Richterbc1738f2011-11-18 12:35:22 +0100794 if (c->overlap)
795 perf_sched_save_state(sched);
796
797 return true;
798}
799
800static bool perf_sched_find_counter(struct perf_sched *sched)
801{
802 while (!__perf_sched_find_counter(sched)) {
803 if (!perf_sched_restore_state(sched))
804 return false;
805 }
806
Robert Richter1e2ad282011-11-18 12:35:21 +0100807 return true;
808}
809
810/*
811 * Go through all unassigned events and find the next one to schedule.
812 * Take events with the least weight first. Return true on success.
813 */
814static bool perf_sched_next_event(struct perf_sched *sched)
815{
816 struct event_constraint *c;
817
818 if (!sched->state.unassigned || !--sched->state.unassigned)
819 return false;
820
821 do {
822 /* next event */
823 sched->state.event++;
824 if (sched->state.event >= sched->max_events) {
825 /* next weight */
826 sched->state.event = 0;
827 sched->state.weight++;
828 if (sched->state.weight > sched->max_weight)
829 return false;
830 }
Peter Zijlstrab371b592015-05-21 10:57:13 +0200831 c = sched->constraints[sched->state.event];
Robert Richter1e2ad282011-11-18 12:35:21 +0100832 } while (c->weight != sched->state.weight);
833
834 sched->state.counter = 0; /* start with first counter */
835
836 return true;
837}
838
839/*
840 * Assign a counter for each event.
841 */
Peter Zijlstrab371b592015-05-21 10:57:13 +0200842int perf_assign_events(struct event_constraint **constraints, int n,
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200843 int wmin, int wmax, int gpmax, int *assign)
Robert Richter1e2ad282011-11-18 12:35:21 +0100844{
845 struct perf_sched sched;
846
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200847 perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
Robert Richter1e2ad282011-11-18 12:35:21 +0100848
849 do {
850 if (!perf_sched_find_counter(&sched))
851 break; /* failed */
852 if (assign)
853 assign[sched.state.event] = sched.state.counter;
854 } while (perf_sched_next_event(&sched));
855
856 return sched.state.unassigned;
857}
Yan, Zheng4a3dc122014-03-18 16:56:43 +0800858EXPORT_SYMBOL_GPL(perf_assign_events);
Robert Richter1e2ad282011-11-18 12:35:21 +0100859
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300860int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200861{
Andrew Hunter43b457802013-05-23 11:07:03 -0700862 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200863 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200864 struct perf_event *e;
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100865 int i, wmin, wmax, unsched = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200866 struct hw_perf_event *hwc;
867
868 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
869
Maria Dimakopoulouc5362c02014-11-17 20:06:55 +0100870 if (x86_pmu.start_scheduling)
871 x86_pmu.start_scheduling(cpuc);
872
Robert Richter1e2ad282011-11-18 12:35:21 +0100873 for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
Peter Zijlstrab371b592015-05-21 10:57:13 +0200874 cpuc->event_constraint[i] = NULL;
Stephane Eranian79cba822014-11-17 20:06:56 +0100875 c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
Peter Zijlstrab371b592015-05-21 10:57:13 +0200876 cpuc->event_constraint[i] = c;
Andrew Hunter43b457802013-05-23 11:07:03 -0700877
Robert Richter1e2ad282011-11-18 12:35:21 +0100878 wmin = min(wmin, c->weight);
879 wmax = max(wmax, c->weight);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200880 }
881
882 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200883 * fastpath, try to reuse previous register
884 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100885 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200886 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstrab371b592015-05-21 10:57:13 +0200887 c = cpuc->event_constraint[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200888
889 /* never assigned */
890 if (hwc->idx == -1)
891 break;
892
893 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100894 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200895 break;
896
897 /* not already used */
898 if (test_bit(hwc->idx, used_mask))
899 break;
900
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100901 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200902 if (assign)
903 assign[i] = hwc->idx;
904 }
Stephane Eranian81130702010-01-21 17:39:01 +0200905
Robert Richter1e2ad282011-11-18 12:35:21 +0100906 /* slow path */
Peter Zijlstrab371b592015-05-21 10:57:13 +0200907 if (i != n) {
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200908 int gpmax = x86_pmu.num_counters;
909
910 /*
911 * Do not allow scheduling of more than half the available
912 * generic counters.
913 *
914 * This helps avoid counter starvation of sibling thread by
915 * ensuring at most half the counters cannot be in exclusive
916 * mode. There is no designated counters for the limits. Any
917 * N/2 counters can be used. This helps with events with
918 * specific counter constraints.
919 */
920 if (is_ht_workaround_enabled() && !cpuc->is_fake &&
921 READ_ONCE(cpuc->excl_cntrs->exclusive_present))
922 gpmax /= 2;
923
Peter Zijlstrab371b592015-05-21 10:57:13 +0200924 unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200925 wmax, gpmax, assign);
Peter Zijlstrab371b592015-05-21 10:57:13 +0200926 }
Stephane Eranian81130702010-01-21 17:39:01 +0200927
Stephane Eranian1da53e02010-01-18 10:58:01 +0200928 /*
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100929 * In case of success (unsched = 0), mark events as committed,
930 * so we do not put_constraint() in case new events are added
931 * and fail to be scheduled
932 *
933 * We invoke the lower level commit callback to lock the resource
934 *
935 * We do not need to do all of this in case we are called to
936 * validate an event group (assign == NULL)
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200937 */
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100938 if (!unsched && assign) {
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200939 for (i = 0; i < n; i++) {
940 e = cpuc->event_list[i];
941 e->hw.flags |= PERF_X86_EVENT_COMMITTED;
Maria Dimakopoulouc5362c02014-11-17 20:06:55 +0100942 if (x86_pmu.commit_scheduling)
Peter Zijlstrab371b592015-05-21 10:57:13 +0200943 x86_pmu.commit_scheduling(cpuc, i, assign[i]);
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200944 }
Peter Zijlstra8736e542015-05-21 10:57:43 +0200945 } else {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200946 for (i = 0; i < n; i++) {
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200947 e = cpuc->event_list[i];
948 /*
949 * do not put_constraint() on comitted events,
950 * because they are good to go
951 */
952 if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
953 continue;
954
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100955 /*
956 * release events that failed scheduling
957 */
Stephane Eranian1da53e02010-01-18 10:58:01 +0200958 if (x86_pmu.put_event_constraints)
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200959 x86_pmu.put_event_constraints(cpuc, e);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200960 }
961 }
Maria Dimakopoulouc5362c02014-11-17 20:06:55 +0100962
963 if (x86_pmu.stop_scheduling)
964 x86_pmu.stop_scheduling(cpuc);
965
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100966 return unsched ? -EINVAL : 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200967}
968
969/*
970 * dogrp: true if must collect siblings events (group)
971 * returns total number of events and error code
972 */
973static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
974{
975 struct perf_event *event;
976 int n, max_count;
977
Robert Richter948b1bb2010-03-29 18:36:50 +0200978 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200979
980 /* current number of events already accepted */
981 n = cpuc->n_events;
982
983 if (is_x86_event(leader)) {
984 if (n >= max_count)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100985 return -EINVAL;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200986 cpuc->event_list[n] = leader;
987 n++;
988 }
989 if (!dogrp)
990 return n;
991
992 list_for_each_entry(event, &leader->sibling_list, group_entry) {
993 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200994 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200995 continue;
996
997 if (n >= max_count)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100998 return -EINVAL;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200999
1000 cpuc->event_list[n] = event;
1001 n++;
1002 }
1003 return n;
1004}
1005
Stephane Eranian1da53e02010-01-18 10:58:01 +02001006static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +02001007 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +02001008{
Stephane Eranian447a1942010-02-01 14:50:01 +02001009 struct hw_perf_event *hwc = &event->hw;
1010
1011 hwc->idx = cpuc->assign[i];
1012 hwc->last_cpu = smp_processor_id();
1013 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +02001014
Robert Richter15c7ad52012-06-20 20:46:33 +02001015 if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001016 hwc->config_base = 0;
1017 hwc->event_base = 0;
Robert Richter15c7ad52012-06-20 20:46:33 +02001018 } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001019 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
Robert Richter15c7ad52012-06-20 20:46:33 +02001020 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
1021 hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001022 } else {
Robert Richter73d6e522011-02-02 17:40:59 +01001023 hwc->config_base = x86_pmu_config_addr(hwc->idx);
1024 hwc->event_base = x86_pmu_event_addr(hwc->idx);
Jacob Shin0fbdad02013-02-06 11:26:28 -06001025 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001026 }
1027}
1028
Stephane Eranian447a1942010-02-01 14:50:01 +02001029static inline int match_prev_assignment(struct hw_perf_event *hwc,
1030 struct cpu_hw_events *cpuc,
1031 int i)
1032{
1033 return hwc->idx == cpuc->assign[i] &&
1034 hwc->last_cpu == smp_processor_id() &&
1035 hwc->last_tag == cpuc->tags[i];
1036}
1037
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001038static void x86_pmu_start(struct perf_event *event, int flags);
Peter Zijlstra2e841872010-01-25 15:58:43 +01001039
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001040static void x86_pmu_enable(struct pmu *pmu)
Ingo Molnaree060942008-12-13 09:00:03 +01001041{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001042 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001043 struct perf_event *event;
1044 struct hw_perf_event *hwc;
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001045 int i, added = cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001046
Robert Richter85cf9db2009-04-29 12:47:20 +02001047 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +01001048 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +01001049
1050 if (cpuc->enabled)
1051 return;
1052
Stephane Eranian1da53e02010-01-18 10:58:01 +02001053 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +01001054 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001055 /*
1056 * apply assignment obtained either from
1057 * hw_perf_group_sched_in() or x86_pmu_enable()
1058 *
1059 * step1: save events moving to new counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001060 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +01001061 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001062 event = cpuc->event_list[i];
1063 hwc = &event->hw;
1064
Stephane Eranian447a1942010-02-01 14:50:01 +02001065 /*
1066 * we can avoid reprogramming counter if:
1067 * - assigned same counter as last time
1068 * - running on same CPU as last time
1069 * - no other event has used the counter since
1070 */
1071 if (hwc->idx == -1 ||
1072 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +02001073 continue;
1074
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001075 /*
1076 * Ensure we don't accidentally enable a stopped
1077 * counter simply because we rescheduled.
1078 */
1079 if (hwc->state & PERF_HES_STOPPED)
1080 hwc->state |= PERF_HES_ARCH;
1081
1082 x86_pmu_stop(event, PERF_EF_UPDATE);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001083 }
1084
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001085 /*
1086 * step2: reprogram moved events into new counters
1087 */
Stephane Eranian1da53e02010-01-18 10:58:01 +02001088 for (i = 0; i < cpuc->n_events; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001089 event = cpuc->event_list[i];
1090 hwc = &event->hw;
1091
Peter Zijlstra45e16a62010-03-11 13:40:30 +01001092 if (!match_prev_assignment(hwc, cpuc, i))
Stephane Eranian447a1942010-02-01 14:50:01 +02001093 x86_assign_hw_event(event, cpuc, i);
Peter Zijlstra45e16a62010-03-11 13:40:30 +01001094 else if (i < n_running)
1095 continue;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001096
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001097 if (hwc->state & PERF_HES_ARCH)
1098 continue;
1099
1100 x86_pmu_start(event, PERF_EF_RELOAD);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001101 }
1102 cpuc->n_added = 0;
1103 perf_events_lapic_init();
1104 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +01001105
1106 cpuc->enabled = 1;
1107 barrier();
1108
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001109 x86_pmu.enable_all(added);
Ingo Molnaree060942008-12-13 09:00:03 +01001110}
Ingo Molnaree060942008-12-13 09:00:03 +01001111
Tejun Heo245b2e72009-06-24 15:13:48 +09001112static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001113
Ingo Molnaree060942008-12-13 09:00:03 +01001114/*
1115 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001116 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +01001117 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001118int x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001119{
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001120 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001121 s64 left = local64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001122 s64 period = hwc->sample_period;
Peter Zijlstra7645a242010-03-08 13:51:31 +01001123 int ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001124
Robert Richter15c7ad52012-06-20 20:46:33 +02001125 if (idx == INTEL_PMC_IDX_FIXED_BTS)
Markus Metzger30dd5682009-07-21 15:56:48 +02001126 return 0;
1127
Ingo Molnaree060942008-12-13 09:00:03 +01001128 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001129 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +01001130 */
1131 if (unlikely(left <= -period)) {
1132 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001133 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001134 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001135 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001136 }
1137
1138 if (unlikely(left <= 0)) {
1139 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001140 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001141 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001142 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001143 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001144 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001145 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001146 */
1147 if (unlikely(left < 2))
1148 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +01001149
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001150 if (left > x86_pmu.max_period)
1151 left = x86_pmu.max_period;
1152
Andi Kleen294fe0f2015-02-17 18:18:06 -08001153 if (x86_pmu.limit_period)
1154 left = x86_pmu.limit_period(event, left);
1155
Tejun Heo245b2e72009-06-24 15:13:48 +09001156 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +01001157
Kan Liangdb27c6c2018-02-12 14:20:31 -08001158 /*
1159 * The hw event starts counting from this event offset,
1160 * mark it to be able to extra future deltas:
1161 */
1162 local64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +01001163
Kan Liangdb27c6c2018-02-12 14:20:31 -08001164 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +04001165
1166 /*
1167 * Due to erratum on certan cpu we need
1168 * a second write to be sure the register
1169 * is updated properly
1170 */
1171 if (x86_pmu.perfctr_second_write) {
Robert Richter73d6e522011-02-02 17:40:59 +01001172 wrmsrl(hwc->event_base,
Robert Richter948b1bb2010-03-29 18:36:50 +02001173 (u64)(-left) & x86_pmu.cntval_mask);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +04001174 }
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001175
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001176 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001177
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001178 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001179}
1180
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001181void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +02001182{
Tejun Heo0a3aee02010-12-18 16:28:55 +01001183 if (__this_cpu_read(cpu_hw_events.enabled))
Robert Richter31fa58a2010-04-13 22:23:14 +02001184 __x86_pmu_enable_event(&event->hw,
1185 ARCH_PERFMON_EVENTSEL_ENABLE);
Ingo Molnar241771e2008-12-03 10:39:53 +01001186}
1187
Ingo Molnaree060942008-12-13 09:00:03 +01001188/*
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001189 * Add a single event to the PMU.
Stephane Eranian1da53e02010-01-18 10:58:01 +02001190 *
1191 * The event is added to the group of enabled events
1192 * but only if it can be scehduled with existing events.
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001193 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001194static int x86_pmu_add(struct perf_event *event, int flags)
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001195{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001196 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001197 struct hw_perf_event *hwc;
1198 int assign[X86_PMC_IDX_MAX];
1199 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001200
Stephane Eranian1da53e02010-01-18 10:58:01 +02001201 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001202
Stephane Eranian1da53e02010-01-18 10:58:01 +02001203 n0 = cpuc->n_events;
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001204 ret = n = collect_events(cpuc, event, false);
1205 if (ret < 0)
1206 goto out;
Ingo Molnar53b441a2009-05-25 21:41:28 +02001207
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001208 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1209 if (!(flags & PERF_EF_START))
1210 hwc->state |= PERF_HES_ARCH;
1211
Lin Ming4d1c52b2010-04-23 13:56:12 +08001212 /*
1213 * If group events scheduling transaction was started,
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001214 * skip the schedulability test here, it will be performed
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001215 * at commit time (->commit_txn) as a whole.
Peter Zijlstra68f70822016-07-06 18:02:43 +02001216 *
1217 * If commit fails, we'll call ->del() on all events
1218 * for which ->add() was called.
Lin Ming4d1c52b2010-04-23 13:56:12 +08001219 */
Sukadev Bhattiprolu8f3e5682015-09-03 20:07:53 -07001220 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001221 goto done_collect;
Lin Ming4d1c52b2010-04-23 13:56:12 +08001222
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001223 ret = x86_pmu.schedule_events(cpuc, n, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001224 if (ret)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001225 goto out;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001226 /*
1227 * copy new assignment, now we know it is possible
1228 * will be used by hw_perf_enable()
1229 */
1230 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +01001231
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001232done_collect:
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001233 /*
1234 * Commit the collect_events() state. See x86_pmu_del() and
1235 * x86_pmu_*_txn().
1236 */
Stephane Eranian1da53e02010-01-18 10:58:01 +02001237 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +01001238 cpuc->n_added += n - n0;
Stephane Eranian90151c352010-05-25 16:23:10 +02001239 cpuc->n_txn += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001240
Peter Zijlstra68f70822016-07-06 18:02:43 +02001241 if (x86_pmu.add) {
1242 /*
1243 * This is before x86_pmu_enable() will call x86_pmu_start(),
1244 * so we enable LBRs before an event needs them etc..
1245 */
1246 x86_pmu.add(event);
1247 }
1248
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001249 ret = 0;
1250out:
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001251 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001252}
1253
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001254static void x86_pmu_start(struct perf_event *event, int flags)
Stephane Eraniand76a0812010-02-08 17:06:01 +02001255{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001256 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001257 int idx = event->hw.idx;
1258
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001259 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1260 return;
Stephane Eraniand76a0812010-02-08 17:06:01 +02001261
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001262 if (WARN_ON_ONCE(idx == -1))
1263 return;
1264
1265 if (flags & PERF_EF_RELOAD) {
1266 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1267 x86_perf_event_set_period(event);
1268 }
1269
1270 event->hw.state = 0;
1271
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001272 cpuc->events[idx] = event;
1273 __set_bit(idx, cpuc->active_mask);
Robert Richter63e6be62010-09-15 18:20:34 +02001274 __set_bit(idx, cpuc->running);
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001275 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001276 perf_event_update_userpage(event);
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001277}
1278
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001279void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001280{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001281 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Andi Kleenda3e6062015-02-27 09:48:31 -08001282 u64 pebs, debugctl;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001283 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001284 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001285 int cpu, idx;
1286
Robert Richter948b1bb2010-03-29 18:36:50 +02001287 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +01001288 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001289
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001290 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001291
1292 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001293 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001294
Robert Richterfaa28ae2009-04-29 12:47:13 +02001295 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301296 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1297 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1298 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1299 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001300
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301301 pr_info("\n");
1302 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1303 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1304 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1305 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Andi Kleen15fde112015-02-27 09:48:32 -08001306 if (x86_pmu.pebs_constraints) {
1307 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1308 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
1309 }
Andi Kleenda3e6062015-02-27 09:48:31 -08001310 if (x86_pmu.lbr_nr) {
1311 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
1312 pr_info("CPU#%d: debugctl: %016llx\n", cpu, debugctl);
1313 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301314 }
Peter Zijlstra7645a242010-03-08 13:51:31 +01001315 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001316
Robert Richter948b1bb2010-03-29 18:36:50 +02001317 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter41bf4982011-02-02 17:40:57 +01001318 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1319 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001320
Tejun Heo245b2e72009-06-24 15:13:48 +09001321 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001322
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301323 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001324 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301325 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001326 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301327 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001328 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001329 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001330 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001331 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1332
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301333 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001334 cpu, idx, pmc_count);
1335 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001336 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001337}
1338
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001339void x86_pmu_stop(struct perf_event *event, int flags)
Ingo Molnar241771e2008-12-03 10:39:53 +01001340{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001341 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001342 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +01001343
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001344 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1345 x86_pmu.disable(event);
1346 cpuc->events[hwc->idx] = NULL;
1347 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1348 hwc->state |= PERF_HES_STOPPED;
1349 }
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001350
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001351 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1352 /*
1353 * Drain the remaining delta count out of a event
1354 * that we are disabling:
1355 */
1356 x86_perf_event_update(event);
1357 hwc->state |= PERF_HES_UPTODATE;
1358 }
Peter Zijlstra2e841872010-01-25 15:58:43 +01001359}
1360
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001361static void x86_pmu_del(struct perf_event *event, int flags)
Peter Zijlstra2e841872010-01-25 15:58:43 +01001362{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001363 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstra2e841872010-01-25 15:58:43 +01001364 int i;
1365
Stephane Eranian90151c352010-05-25 16:23:10 +02001366 /*
Stephane Eranian2f7f73a2013-06-20 18:42:54 +02001367 * event is descheduled
1368 */
1369 event->hw.flags &= ~PERF_X86_EVENT_COMMITTED;
1370
1371 /*
Peter Zijlstra68f70822016-07-06 18:02:43 +02001372 * If we're called during a txn, we only need to undo x86_pmu.add.
Stephane Eranian90151c352010-05-25 16:23:10 +02001373 * The events never got scheduled and ->cancel_txn will truncate
1374 * the event_list.
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001375 *
1376 * XXX assumes any ->del() called during a TXN will only be on
1377 * an event added during that same TXN.
Stephane Eranian90151c352010-05-25 16:23:10 +02001378 */
Sukadev Bhattiprolu8f3e5682015-09-03 20:07:53 -07001379 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
Peter Zijlstra68f70822016-07-06 18:02:43 +02001380 goto do_del;
Stephane Eranian90151c352010-05-25 16:23:10 +02001381
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001382 /*
1383 * Not a TXN, therefore cleanup properly.
1384 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001385 x86_pmu_stop(event, PERF_EF_UPDATE);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001386
Stephane Eranian1da53e02010-01-18 10:58:01 +02001387 for (i = 0; i < cpuc->n_events; i++) {
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001388 if (event == cpuc->event_list[i])
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001389 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001390 }
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001391
1392 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */
1393 return;
1394
1395 /* If we have a newly added event; make sure to decrease n_added. */
1396 if (i >= cpuc->n_events - cpuc->n_added)
1397 --cpuc->n_added;
1398
1399 if (x86_pmu.put_event_constraints)
1400 x86_pmu.put_event_constraints(cpuc, event);
1401
1402 /* Delete the array entry. */
Peter Zijlstrab371b592015-05-21 10:57:13 +02001403 while (++i < cpuc->n_events) {
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001404 cpuc->event_list[i-1] = cpuc->event_list[i];
Peter Zijlstrab371b592015-05-21 10:57:13 +02001405 cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
1406 }
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001407 --cpuc->n_events;
1408
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001409 perf_event_update_userpage(event);
Peter Zijlstra68f70822016-07-06 18:02:43 +02001410
1411do_del:
1412 if (x86_pmu.del) {
1413 /*
1414 * This is after x86_pmu_stop(); so we disable LBRs after any
1415 * event can need them etc..
1416 */
1417 x86_pmu.del(event);
1418 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001419}
1420
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001421int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001422{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001423 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001424 struct cpu_hw_events *cpuc;
1425 struct perf_event *event;
Vince Weaver11d15782009-07-08 17:46:14 -04001426 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001427 u64 val;
1428
Christoph Lameter89cbc762014-08-17 12:30:40 -05001429 cpuc = this_cpu_ptr(&cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001430
Don Zickus2bce5da2011-04-27 06:32:33 -04001431 /*
1432 * Some chipsets need to unmask the LVTPC in a particular spot
1433 * inside the nmi handler. As a result, the unmasking was pushed
1434 * into all the nmi handlers.
1435 *
1436 * This generic handler doesn't seem to have any issues where the
1437 * unmasking occurs so it was left at the top.
1438 */
1439 apic_write(APIC_LVTPC, APIC_DM_NMI);
1440
Robert Richter948b1bb2010-03-29 18:36:50 +02001441 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter63e6be62010-09-15 18:20:34 +02001442 if (!test_bit(idx, cpuc->active_mask)) {
1443 /*
1444 * Though we deactivated the counter some cpus
1445 * might still deliver spurious interrupts still
1446 * in flight. Catch them:
1447 */
1448 if (__test_and_clear_bit(idx, cpuc->running))
1449 handled++;
Robert Richtera29aa8a2009-04-29 12:47:21 +02001450 continue;
Robert Richter63e6be62010-09-15 18:20:34 +02001451 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001452
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001453 event = cpuc->events[idx];
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001454
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001455 val = x86_perf_event_update(event);
Robert Richter948b1bb2010-03-29 18:36:50 +02001456 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001457 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001458
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001459 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001460 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001461 */
Robert Richter4177c422010-09-02 15:07:48 -04001462 handled++;
Robert Richterfd0d0002012-04-02 20:19:08 +02001463 perf_sample_data_init(&data, 0, event->hw.last_period);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001464
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001465 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001466 continue;
1467
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001468 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001469 x86_pmu_stop(event, 0);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001470 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001471
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001472 if (handled)
1473 inc_irq_stat(apic_perf_irqs);
1474
Robert Richtera29aa8a2009-04-29 12:47:21 +02001475 return handled;
1476}
Robert Richter39d81ea2009-04-29 12:47:05 +02001477
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001478void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001479{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001480 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001481 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001482
Ingo Molnar241771e2008-12-03 10:39:53 +01001483 /*
Yong Wangc323d952009-05-29 13:28:35 +08001484 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001485 */
Yong Wangc323d952009-05-29 13:28:35 +08001486 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar241771e2008-12-03 10:39:53 +01001487}
1488
Masami Hiramatsu93266382014-04-17 17:18:14 +09001489static int
Don Zickus9c48f1c2011-09-30 15:06:21 -04001490perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01001491{
Dave Hansen14c63f12013-06-21 08:51:36 -07001492 u64 start_clock;
1493 u64 finish_clock;
Peter Zijlstrae8a923c2013-10-17 15:32:10 +02001494 int ret;
Dave Hansen14c63f12013-06-21 08:51:36 -07001495
Alexander Shishkin1b7b9382015-06-09 13:03:26 +03001496 /*
1497 * All PMUs/events that share this PMI handler should make sure to
1498 * increment active_events for their events.
1499 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001500 if (!atomic_read(&active_events))
Don Zickus9c48f1c2011-09-30 15:06:21 -04001501 return NMI_DONE;
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001502
Peter Zijlstrae8a923c2013-10-17 15:32:10 +02001503 start_clock = sched_clock();
Dave Hansen14c63f12013-06-21 08:51:36 -07001504 ret = x86_pmu.handle_irq(regs);
Peter Zijlstrae8a923c2013-10-17 15:32:10 +02001505 finish_clock = sched_clock();
Dave Hansen14c63f12013-06-21 08:51:36 -07001506
1507 perf_sample_event_took(finish_clock - start_clock);
1508
1509 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001510}
Masami Hiramatsu93266382014-04-17 17:18:14 +09001511NOKPROBE_SYMBOL(perf_event_nmi_handler);
Ingo Molnar241771e2008-12-03 10:39:53 +01001512
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001513struct event_constraint emptyconstraint;
1514struct event_constraint unconstrained;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301515
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001516static int x86_pmu_prepare_cpu(unsigned int cpu)
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001517{
Peter Zijlstra7fdba1c2011-07-22 13:41:54 +02001518 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001519 int i;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001520
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001521 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
1522 cpuc->kfree_on_online[i] = NULL;
1523 if (x86_pmu.cpu_prepare)
1524 return x86_pmu.cpu_prepare(cpu);
1525 return 0;
1526}
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001527
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001528static int x86_pmu_dead_cpu(unsigned int cpu)
1529{
1530 if (x86_pmu.cpu_dead)
1531 x86_pmu.cpu_dead(cpu);
1532 return 0;
1533}
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001534
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001535static int x86_pmu_online_cpu(unsigned int cpu)
1536{
1537 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1538 int i;
Peter Zijlstra7fdba1c2011-07-22 13:41:54 +02001539
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001540 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
1541 kfree(cpuc->kfree_on_online[i]);
1542 cpuc->kfree_on_online[i] = NULL;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001543 }
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001544 return 0;
1545}
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001546
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001547static int x86_pmu_starting_cpu(unsigned int cpu)
1548{
1549 if (x86_pmu.cpu_starting)
1550 x86_pmu.cpu_starting(cpu);
1551 return 0;
1552}
1553
1554static int x86_pmu_dying_cpu(unsigned int cpu)
1555{
1556 if (x86_pmu.cpu_dying)
1557 x86_pmu.cpu_dying(cpu);
1558 return 0;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001559}
1560
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001561static void __init pmu_check_apic(void)
1562{
Borislav Petkov93984fb2016-04-04 22:25:00 +02001563 if (boot_cpu_has(X86_FEATURE_APIC))
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001564 return;
1565
1566 x86_pmu.apic = 0;
1567 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1568 pr_info("no hardware sampling interrupt available.\n");
Vince Weaverc184c982014-05-16 17:18:07 -04001569
1570 /*
1571 * If we have a PMU initialized but no APIC
1572 * interrupts, we cannot sample hardware
1573 * events (user-space has to fall back and
1574 * sample via a hrtimer based software event):
1575 */
1576 pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1577
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001578}
1579
Jiri Olsa641cc932012-03-15 20:09:14 +01001580static struct attribute_group x86_pmu_format_group = {
1581 .name = "format",
1582 .attrs = NULL,
1583};
1584
Jiri Olsa8300daa2012-10-10 14:53:12 +02001585/*
1586 * Remove all undefined events (x86_pmu.event_map(id) == 0)
1587 * out of events_attr attributes.
1588 */
1589static void __init filter_events(struct attribute **attrs)
1590{
Stephane Eranian3a54aaa2013-01-24 16:10:26 +01001591 struct device_attribute *d;
1592 struct perf_pmu_events_attr *pmu_attr;
Stephane Eranian61b87ca2015-12-07 20:33:25 +01001593 int offset = 0;
Jiri Olsa8300daa2012-10-10 14:53:12 +02001594 int i, j;
1595
1596 for (i = 0; attrs[i]; i++) {
Stephane Eranian3a54aaa2013-01-24 16:10:26 +01001597 d = (struct device_attribute *)attrs[i];
1598 pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
1599 /* str trumps id */
1600 if (pmu_attr->event_str)
1601 continue;
Stephane Eranian61b87ca2015-12-07 20:33:25 +01001602 if (x86_pmu.event_map(i + offset))
Jiri Olsa8300daa2012-10-10 14:53:12 +02001603 continue;
1604
1605 for (j = i; attrs[j]; j++)
1606 attrs[j] = attrs[j + 1];
1607
1608 /* Check the shifted attr. */
1609 i--;
Stephane Eranian61b87ca2015-12-07 20:33:25 +01001610
1611 /*
1612 * event_map() is index based, the attrs array is organized
1613 * by increasing event index. If we shift the events, then
1614 * we need to compensate for the event_map(), otherwise
1615 * we are looking up the wrong event in the map
1616 */
1617 offset++;
Jiri Olsa8300daa2012-10-10 14:53:12 +02001618 }
1619}
1620
Andi Kleen1a6461b2013-01-24 16:10:25 +01001621/* Merge two pointer arrays */
Andi Kleen47732d82015-06-29 14:22:13 -07001622__init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
Andi Kleen1a6461b2013-01-24 16:10:25 +01001623{
1624 struct attribute **new;
1625 int j, i;
1626
1627 for (j = 0; a[j]; j++)
1628 ;
1629 for (i = 0; b[i]; i++)
1630 j++;
1631 j++;
1632
1633 new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
1634 if (!new)
1635 return NULL;
1636
1637 j = 0;
1638 for (i = 0; a[i]; i++)
1639 new[j++] = a[i];
1640 for (i = 0; b[i]; i++)
1641 new[j++] = b[i];
1642 new[j] = NULL;
1643
1644 return new;
1645}
1646
Huang Ruic7ab62b2016-03-09 13:45:06 +08001647ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
Jiri Olsaa4747392012-10-10 14:53:11 +02001648{
1649 struct perf_pmu_events_attr *pmu_attr = \
1650 container_of(attr, struct perf_pmu_events_attr, attr);
Jiri Olsaa4747392012-10-10 14:53:11 +02001651 u64 config = x86_pmu.event_map(pmu_attr->id);
Stephane Eranian3a54aaa2013-01-24 16:10:26 +01001652
1653 /* string trumps id */
1654 if (pmu_attr->event_str)
1655 return sprintf(page, "%s", pmu_attr->event_str);
1656
Jiri Olsaa4747392012-10-10 14:53:11 +02001657 return x86_pmu.events_sysfs_show(page, config);
1658}
Huang Ruic7ab62b2016-03-09 13:45:06 +08001659EXPORT_SYMBOL_GPL(events_sysfs_show);
Jiri Olsaa4747392012-10-10 14:53:11 +02001660
Andi Kleenfc07e9f2016-05-19 17:09:56 -07001661ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
1662 char *page)
1663{
1664 struct perf_pmu_events_ht_attr *pmu_attr =
1665 container_of(attr, struct perf_pmu_events_ht_attr, attr);
1666
1667 /*
1668 * Report conditional events depending on Hyper-Threading.
1669 *
1670 * This is overly conservative as usually the HT special
1671 * handling is not needed if the other CPU thread is idle.
1672 *
1673 * Note this does not (and cannot) handle the case when thread
1674 * siblings are invisible, for example with virtualization
1675 * if they are owned by some other guest. The user tool
1676 * has to re-read when a thread sibling gets onlined later.
1677 */
1678 return sprintf(page, "%s",
1679 topology_max_smt_threads() > 1 ?
1680 pmu_attr->event_str_ht :
1681 pmu_attr->event_str_noht);
1682}
1683
Jiri Olsaa4747392012-10-10 14:53:11 +02001684EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1685EVENT_ATTR(instructions, INSTRUCTIONS );
1686EVENT_ATTR(cache-references, CACHE_REFERENCES );
1687EVENT_ATTR(cache-misses, CACHE_MISSES );
1688EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS );
1689EVENT_ATTR(branch-misses, BRANCH_MISSES );
1690EVENT_ATTR(bus-cycles, BUS_CYCLES );
1691EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND );
1692EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND );
1693EVENT_ATTR(ref-cycles, REF_CPU_CYCLES );
1694
1695static struct attribute *empty_attrs;
1696
Peter Huewe95d18aa2012-10-29 21:48:17 +01001697static struct attribute *events_attr[] = {
Jiri Olsaa4747392012-10-10 14:53:11 +02001698 EVENT_PTR(CPU_CYCLES),
1699 EVENT_PTR(INSTRUCTIONS),
1700 EVENT_PTR(CACHE_REFERENCES),
1701 EVENT_PTR(CACHE_MISSES),
1702 EVENT_PTR(BRANCH_INSTRUCTIONS),
1703 EVENT_PTR(BRANCH_MISSES),
1704 EVENT_PTR(BUS_CYCLES),
1705 EVENT_PTR(STALLED_CYCLES_FRONTEND),
1706 EVENT_PTR(STALLED_CYCLES_BACKEND),
1707 EVENT_PTR(REF_CPU_CYCLES),
1708 NULL,
1709};
1710
1711static struct attribute_group x86_pmu_events_group = {
1712 .name = "events",
1713 .attrs = events_attr,
1714};
1715
Jiri Olsa0bf79d42012-10-10 14:53:14 +02001716ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
Jiri Olsa43c032f2012-10-10 14:53:13 +02001717{
Jiri Olsa43c032f2012-10-10 14:53:13 +02001718 u64 umask = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
1719 u64 cmask = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24;
1720 bool edge = (config & ARCH_PERFMON_EVENTSEL_EDGE);
1721 bool pc = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL);
1722 bool any = (config & ARCH_PERFMON_EVENTSEL_ANY);
1723 bool inv = (config & ARCH_PERFMON_EVENTSEL_INV);
1724 ssize_t ret;
1725
1726 /*
1727 * We have whole page size to spend and just little data
1728 * to write, so we can safely use sprintf.
1729 */
1730 ret = sprintf(page, "event=0x%02llx", event);
1731
1732 if (umask)
1733 ret += sprintf(page + ret, ",umask=0x%02llx", umask);
1734
1735 if (edge)
1736 ret += sprintf(page + ret, ",edge");
1737
1738 if (pc)
1739 ret += sprintf(page + ret, ",pc");
1740
1741 if (any)
1742 ret += sprintf(page + ret, ",any");
1743
1744 if (inv)
1745 ret += sprintf(page + ret, ",inv");
1746
1747 if (cmask)
1748 ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
1749
1750 ret += sprintf(page + ret, "\n");
1751
1752 return ret;
1753}
1754
Yinghai Ludda99112011-01-21 15:30:01 -08001755static int __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301756{
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01001757 struct x86_pmu_quirk *quirk;
Robert Richter72eae042009-04-29 12:47:10 +02001758 int err;
1759
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001760 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001761
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301762 switch (boot_cpu_data.x86_vendor) {
1763 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001764 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301765 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301766 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001767 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301768 break;
Robert Richter41389602009-04-29 12:47:00 +02001769 default:
Ingo Molnar8a3da6c72013-09-28 15:48:48 +02001770 err = -ENOTSUPP;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301771 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001772 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001773 pr_cont("no PMU driver, software events only.\n");
Peter Zijlstra004417a2010-11-25 18:38:29 +01001774 return 0;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001775 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301776
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001777 pmu_check_apic();
1778
Don Zickus33c6d6a2010-11-22 16:55:23 -05001779 /* sanity check that the hardware exists or is emulated */
Peter Zijlstra44072042010-12-08 15:56:23 +01001780 if (!check_hw_exists())
Peter Zijlstra004417a2010-11-25 18:38:29 +01001781 return 0;
Don Zickus33c6d6a2010-11-22 16:55:23 -05001782
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001783 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001784
Peter Zijlstrae97df762014-02-05 20:48:51 +01001785 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1786
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01001787 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1788 quirk->func();
Peter Zijlstra3c447802010-03-04 21:49:01 +01001789
Robert Richtera1eac7a2012-06-20 20:46:34 +02001790 if (!x86_pmu.intel_ctrl)
1791 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001792
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001793 perf_events_lapic_init();
Don Zickus9c48f1c2011-09-30 15:06:21 -04001794 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001795
Peter Zijlstra63b14642010-01-22 16:32:17 +01001796 unconstrained = (struct event_constraint)
Robert Richter948b1bb2010-03-29 18:36:50 +02001797 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
Stephane Eranian9fac2cf2013-01-24 16:10:27 +01001798 0, x86_pmu.num_counters, 0, 0);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001799
Jiri Olsa641cc932012-03-15 20:09:14 +01001800 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01001801
Stephane Eranianf20093e2013-01-24 16:10:32 +01001802 if (x86_pmu.event_attrs)
1803 x86_pmu_events_group.attrs = x86_pmu.event_attrs;
1804
Jiri Olsaa4747392012-10-10 14:53:11 +02001805 if (!x86_pmu.events_sysfs_show)
1806 x86_pmu_events_group.attrs = &empty_attrs;
Jiri Olsa8300daa2012-10-10 14:53:12 +02001807 else
1808 filter_events(x86_pmu_events_group.attrs);
Jiri Olsaa4747392012-10-10 14:53:11 +02001809
Andi Kleen1a6461b2013-01-24 16:10:25 +01001810 if (x86_pmu.cpu_events) {
1811 struct attribute **tmp;
1812
1813 tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events);
1814 if (!WARN_ON(!tmp))
1815 x86_pmu_events_group.attrs = tmp;
1816 }
1817
Ingo Molnar57c0c152009-09-21 12:20:38 +02001818 pr_info("... version: %d\n", x86_pmu.version);
Robert Richter948b1bb2010-03-29 18:36:50 +02001819 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1820 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1821 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
Ingo Molnar57c0c152009-09-21 12:20:38 +02001822 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
Robert Richter948b1bb2010-03-29 18:36:50 +02001823 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
Robert Richterd6dc0b42010-03-17 12:49:13 +01001824 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001825
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001826 /*
1827 * Install callbacks. Core will call them for each online
1828 * cpu.
1829 */
1830 err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "PERF_X86_PREPARE",
1831 x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
1832 if (err)
1833 return err;
1834
1835 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
1836 "AP_PERF_X86_STARTING", x86_pmu_starting_cpu,
1837 x86_pmu_dying_cpu);
1838 if (err)
1839 goto out;
1840
1841 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "AP_PERF_X86_ONLINE",
1842 x86_pmu_online_cpu, NULL);
1843 if (err)
1844 goto out1;
1845
1846 err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1847 if (err)
1848 goto out2;
Peter Zijlstra004417a2010-11-25 18:38:29 +01001849
1850 return 0;
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001851
1852out2:
1853 cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE);
1854out1:
1855 cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING);
1856out:
1857 cpuhp_remove_state(CPUHP_PERF_X86_PREPARE);
1858 return err;
Ingo Molnar241771e2008-12-03 10:39:53 +01001859}
Peter Zijlstra004417a2010-11-25 18:38:29 +01001860early_initcall(init_hw_perf_events);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001861
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001862static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001863{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001864 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001865}
1866
Lin Ming4d1c52b2010-04-23 13:56:12 +08001867/*
1868 * Start group events scheduling transaction
1869 * Set the flag to make pmu::enable() not perform the
1870 * schedulability test, it will be performed at commit time
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001871 *
1872 * We only support PERF_PMU_TXN_ADD transactions. Save the
1873 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
1874 * transactions.
Lin Ming4d1c52b2010-04-23 13:56:12 +08001875 */
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001876static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001877{
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001878 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1879
1880 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */
1881
1882 cpuc->txn_flags = txn_flags;
1883 if (txn_flags & ~PERF_PMU_TXN_ADD)
1884 return;
1885
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001886 perf_pmu_disable(pmu);
Tejun Heo0a3aee02010-12-18 16:28:55 +01001887 __this_cpu_write(cpu_hw_events.n_txn, 0);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001888}
1889
1890/*
1891 * Stop group events scheduling transaction
1892 * Clear the flag and pmu::enable() will perform the
1893 * schedulability test.
1894 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001895static void x86_pmu_cancel_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001896{
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001897 unsigned int txn_flags;
1898 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1899
1900 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1901
1902 txn_flags = cpuc->txn_flags;
1903 cpuc->txn_flags = 0;
1904 if (txn_flags & ~PERF_PMU_TXN_ADD)
1905 return;
1906
Stephane Eranian90151c352010-05-25 16:23:10 +02001907 /*
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001908 * Truncate collected array by the number of events added in this
1909 * transaction. See x86_pmu_add() and x86_pmu_*_txn().
Stephane Eranian90151c352010-05-25 16:23:10 +02001910 */
Tejun Heo0a3aee02010-12-18 16:28:55 +01001911 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1912 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001913 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001914}
1915
1916/*
1917 * Commit group events scheduling transaction
1918 * Perform the group schedulability test as a whole
1919 * Return 0 if success
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001920 *
1921 * Does not cancel the transaction on failure; expects the caller to do this.
Lin Ming4d1c52b2010-04-23 13:56:12 +08001922 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001923static int x86_pmu_commit_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001924{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001925 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001926 int assign[X86_PMC_IDX_MAX];
1927 int n, ret;
1928
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001929 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1930
1931 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
1932 cpuc->txn_flags = 0;
1933 return 0;
1934 }
1935
Lin Ming4d1c52b2010-04-23 13:56:12 +08001936 n = cpuc->n_events;
1937
1938 if (!x86_pmu_initialized())
1939 return -EAGAIN;
1940
1941 ret = x86_pmu.schedule_events(cpuc, n, assign);
1942 if (ret)
1943 return ret;
1944
1945 /*
1946 * copy new assignment, now we know it is possible
1947 * will be used by hw_perf_enable()
1948 */
1949 memcpy(cpuc->assign, assign, n*sizeof(int));
1950
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001951 cpuc->txn_flags = 0;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001952 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001953 return 0;
1954}
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001955/*
1956 * a fake_cpuc is used to validate event groups. Due to
1957 * the extra reg logic, we need to also allocate a fake
1958 * per_core and per_cpu structure. Otherwise, group events
1959 * using extra reg may conflict without the kernel being
1960 * able to catch this when the last event gets added to
1961 * the group.
1962 */
1963static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1964{
1965 kfree(cpuc->shared_regs);
1966 kfree(cpuc);
1967}
1968
1969static struct cpu_hw_events *allocate_fake_cpuc(void)
1970{
1971 struct cpu_hw_events *cpuc;
1972 int cpu = raw_smp_processor_id();
1973
1974 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1975 if (!cpuc)
1976 return ERR_PTR(-ENOMEM);
1977
1978 /* only needed, if we have extra_regs */
1979 if (x86_pmu.extra_regs) {
1980 cpuc->shared_regs = allocate_shared_regs(cpu);
1981 if (!cpuc->shared_regs)
1982 goto error;
1983 }
Peter Zijlstrab430f7c2012-06-05 15:30:31 +02001984 cpuc->is_fake = 1;
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001985 return cpuc;
1986error:
1987 free_fake_cpuc(cpuc);
1988 return ERR_PTR(-ENOMEM);
1989}
Lin Ming4d1c52b2010-04-23 13:56:12 +08001990
Stephane Eranian1da53e02010-01-18 10:58:01 +02001991/*
Peter Zijlstraca037702010-03-02 19:52:12 +01001992 * validate that we can schedule this event
1993 */
1994static int validate_event(struct perf_event *event)
1995{
1996 struct cpu_hw_events *fake_cpuc;
1997 struct event_constraint *c;
1998 int ret = 0;
1999
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02002000 fake_cpuc = allocate_fake_cpuc();
2001 if (IS_ERR(fake_cpuc))
2002 return PTR_ERR(fake_cpuc);
Peter Zijlstraca037702010-03-02 19:52:12 +01002003
Stephane Eranian79cba822014-11-17 20:06:56 +01002004 c = x86_pmu.get_event_constraints(fake_cpuc, -1, event);
Peter Zijlstraca037702010-03-02 19:52:12 +01002005
2006 if (!c || !c->weight)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +01002007 ret = -EINVAL;
Peter Zijlstraca037702010-03-02 19:52:12 +01002008
2009 if (x86_pmu.put_event_constraints)
2010 x86_pmu.put_event_constraints(fake_cpuc, event);
2011
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02002012 free_fake_cpuc(fake_cpuc);
Peter Zijlstraca037702010-03-02 19:52:12 +01002013
2014 return ret;
2015}
2016
2017/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02002018 * validate a single event group
2019 *
2020 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01002021 * - check events are compatible which each other
2022 * - events do not compete for the same counter
2023 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02002024 *
2025 * validation ensures the group can be loaded onto the
2026 * PMU if it was the only group available.
2027 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002028static int validate_group(struct perf_event *event)
2029{
Stephane Eranian1da53e02010-01-18 10:58:01 +02002030 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01002031 struct cpu_hw_events *fake_cpuc;
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +01002032 int ret = -EINVAL, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002033
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02002034 fake_cpuc = allocate_fake_cpuc();
2035 if (IS_ERR(fake_cpuc))
2036 return PTR_ERR(fake_cpuc);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002037 /*
2038 * the event is not yet connected with its
2039 * siblings therefore we must first collect
2040 * existing siblings, then add the new event
2041 * before we can simulate the scheduling
2042 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01002043 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002044 if (n < 0)
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02002045 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002046
Peter Zijlstra502568d2010-01-22 14:35:46 +01002047 fake_cpuc->n_events = n;
2048 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002049 if (n < 0)
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02002050 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002051
Peter Zijlstra502568d2010-01-22 14:35:46 +01002052 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002053
Cyrill Gorcunova0727382010-03-11 19:54:39 +03002054 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
Peter Zijlstra502568d2010-01-22 14:35:46 +01002055
Peter Zijlstra502568d2010-01-22 14:35:46 +01002056out:
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02002057 free_fake_cpuc(fake_cpuc);
Peter Zijlstra502568d2010-01-22 14:35:46 +01002058 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002059}
2060
Yinghai Ludda99112011-01-21 15:30:01 -08002061static int x86_pmu_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01002062{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02002063 struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002064 int err;
2065
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02002066 switch (event->attr.type) {
2067 case PERF_TYPE_RAW:
2068 case PERF_TYPE_HARDWARE:
2069 case PERF_TYPE_HW_CACHE:
2070 break;
2071
2072 default:
2073 return -ENOENT;
2074 }
2075
2076 err = __x86_pmu_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002077 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02002078 /*
2079 * we temporarily connect event to its pmu
2080 * such that validate_group() can classify
2081 * it as an x86 event using is_x86_event()
2082 */
2083 tmp = event->pmu;
2084 event->pmu = &pmu;
2085
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002086 if (event->group_leader != event)
2087 err = validate_group(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01002088 else
2089 err = validate_event(event);
Stephane Eranian81130702010-01-21 17:39:01 +02002090
2091 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002092 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002093 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002094 if (event->destroy)
2095 event->destroy(event);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002096 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01002097
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002098 if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
2099 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
2100
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02002101 return err;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002102}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002103
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002104static void refresh_pce(void *ignored)
2105{
Andy Lutomirski9f9115b2017-03-16 12:59:39 -07002106 if (current->active_mm)
2107 load_mm_cr4(current->active_mm);
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002108}
2109
2110static void x86_pmu_event_mapped(struct perf_event *event)
2111{
2112 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2113 return;
2114
2115 if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
2116 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2117}
2118
2119static void x86_pmu_event_unmapped(struct perf_event *event)
2120{
2121 if (!current->mm)
2122 return;
2123
2124 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2125 return;
2126
2127 if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
2128 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2129}
2130
Peter Zijlstrafe4a3302011-11-20 20:44:06 +01002131static int x86_pmu_event_idx(struct perf_event *event)
2132{
2133 int idx = event->hw.idx;
2134
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002135 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
Peter Zijlstrac7206202012-03-22 17:26:36 +01002136 return 0;
2137
Robert Richter15c7ad52012-06-20 20:46:33 +02002138 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
2139 idx -= INTEL_PMC_IDX_FIXED;
Peter Zijlstrafe4a3302011-11-20 20:44:06 +01002140 idx |= 1 << 30;
2141 }
2142
2143 return idx + 1;
2144}
2145
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002146static ssize_t get_attr_rdpmc(struct device *cdev,
2147 struct device_attribute *attr,
2148 char *buf)
2149{
2150 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
2151}
2152
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002153static ssize_t set_attr_rdpmc(struct device *cdev,
2154 struct device_attribute *attr,
2155 const char *buf, size_t count)
2156{
Shuah Khane2b297f2012-06-10 21:13:41 -06002157 unsigned long val;
2158 ssize_t ret;
2159
2160 ret = kstrtoul(buf, 0, &val);
2161 if (ret)
2162 return ret;
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002163
Andy Lutomirskia6673422014-10-24 15:58:13 -07002164 if (val > 2)
2165 return -EINVAL;
2166
Peter Zijlstrae97df762014-02-05 20:48:51 +01002167 if (x86_pmu.attr_rdpmc_broken)
2168 return -ENOTSUPP;
2169
Andy Lutomirskia6673422014-10-24 15:58:13 -07002170 if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
2171 /*
2172 * Changing into or out of always available, aka
2173 * perf-event-bypassing mode. This path is extremely slow,
2174 * but only root can trigger it, so it's okay.
2175 */
2176 if (val == 2)
2177 static_key_slow_inc(&rdpmc_always_available);
2178 else
2179 static_key_slow_dec(&rdpmc_always_available);
2180 on_each_cpu(refresh_pce, NULL, 1);
2181 }
2182
2183 x86_pmu.attr_rdpmc = val;
2184
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002185 return count;
2186}
2187
2188static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
2189
2190static struct attribute *x86_pmu_attrs[] = {
2191 &dev_attr_rdpmc.attr,
2192 NULL,
2193};
2194
2195static struct attribute_group x86_pmu_attr_group = {
2196 .attrs = x86_pmu_attrs,
2197};
2198
2199static const struct attribute_group *x86_pmu_attr_groups[] = {
2200 &x86_pmu_attr_group,
Jiri Olsa641cc932012-03-15 20:09:14 +01002201 &x86_pmu_format_group,
Jiri Olsaa4747392012-10-10 14:53:11 +02002202 &x86_pmu_events_group,
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002203 NULL,
2204};
2205
Yan, Zhengba532502014-11-04 21:55:58 -05002206static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
Stephane Eraniand010b332012-02-09 23:21:00 +01002207{
Yan, Zhengba532502014-11-04 21:55:58 -05002208 if (x86_pmu.sched_task)
2209 x86_pmu.sched_task(ctx, sched_in);
Stephane Eraniand010b332012-02-09 23:21:00 +01002210}
2211
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002212void perf_check_microcode(void)
2213{
2214 if (x86_pmu.check_microcode)
2215 x86_pmu.check_microcode();
2216}
2217EXPORT_SYMBOL_GPL(perf_check_microcode);
2218
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02002219static struct pmu pmu = {
Stephane Eraniand010b332012-02-09 23:21:00 +01002220 .pmu_enable = x86_pmu_enable,
2221 .pmu_disable = x86_pmu_disable,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002222
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002223 .attr_groups = x86_pmu_attr_groups,
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002224
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002225 .event_init = x86_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002226
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002227 .event_mapped = x86_pmu_event_mapped,
2228 .event_unmapped = x86_pmu_event_unmapped,
2229
Stephane Eraniand010b332012-02-09 23:21:00 +01002230 .add = x86_pmu_add,
2231 .del = x86_pmu_del,
2232 .start = x86_pmu_start,
2233 .stop = x86_pmu_stop,
2234 .read = x86_pmu_read,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002235
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002236 .start_txn = x86_pmu_start_txn,
2237 .cancel_txn = x86_pmu_cancel_txn,
2238 .commit_txn = x86_pmu_commit_txn,
Peter Zijlstrafe4a3302011-11-20 20:44:06 +01002239
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002240 .event_idx = x86_pmu_event_idx,
Yan, Zhengba532502014-11-04 21:55:58 -05002241 .sched_task = x86_pmu_sched_task,
Yan, Zhenge18bf522014-11-04 21:56:03 -05002242 .task_ctx_size = sizeof(struct x86_perf_task_context),
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02002243};
2244
Andy Lutomirskic1317ec2014-10-24 15:58:11 -07002245void arch_perf_update_userpage(struct perf_event *event,
2246 struct perf_event_mmap_page *userpg, u64 now)
Peter Zijlstrae3f35412011-11-21 11:43:53 +01002247{
Peter Zijlstra20d1c862013-11-29 15:40:29 +01002248 struct cyc2ns_data *data;
2249
Peter Zijlstrafa731582013-09-19 10:16:42 +02002250 userpg->cap_user_time = 0;
2251 userpg->cap_user_time_zero = 0;
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002252 userpg->cap_user_rdpmc =
2253 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
Peter Zijlstrac7206202012-03-22 17:26:36 +01002254 userpg->pmc_width = x86_pmu.cntval_bits;
2255
Peter Zijlstra35af99e2013-11-28 19:38:42 +01002256 if (!sched_clock_stable())
Peter Zijlstrae3f35412011-11-21 11:43:53 +01002257 return;
2258
Peter Zijlstra20d1c862013-11-29 15:40:29 +01002259 data = cyc2ns_read_begin();
2260
Peter Zijlstra34f43922015-02-20 14:05:38 +01002261 /*
2262 * Internal timekeeping for enabled/running/stopped times
2263 * is always in the local_clock domain.
2264 */
Peter Zijlstrafa731582013-09-19 10:16:42 +02002265 userpg->cap_user_time = 1;
Peter Zijlstra20d1c862013-11-29 15:40:29 +01002266 userpg->time_mult = data->cyc2ns_mul;
2267 userpg->time_shift = data->cyc2ns_shift;
2268 userpg->time_offset = data->cyc2ns_offset - now;
Adrian Hunterc73deb62013-06-28 16:22:18 +03002269
Peter Zijlstra34f43922015-02-20 14:05:38 +01002270 /*
2271 * cap_user_time_zero doesn't make sense when we're using a different
2272 * time base for the records.
2273 */
Alexander Shishkinf454bfd2016-04-14 14:59:49 +03002274 if (!event->attr.use_clockid) {
Peter Zijlstra34f43922015-02-20 14:05:38 +01002275 userpg->cap_user_time_zero = 1;
2276 userpg->time_zero = data->cyc2ns_offset;
2277 }
Peter Zijlstra20d1c862013-11-29 15:40:29 +01002278
2279 cyc2ns_read_end(data);
Peter Zijlstrae3f35412011-11-21 11:43:53 +01002280}
2281
Frederic Weisbecker56962b42010-06-30 23:03:51 +02002282void
Arnaldo Carvalho de Melocfbcf462016-04-28 12:30:53 -03002283perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002284{
Josh Poimboeuf35f4d9b2016-09-16 14:18:13 -05002285 struct unwind_state state;
2286 unsigned long addr;
2287
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002288 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2289 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02002290 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002291 }
2292
Josh Poimboeuf019e5792016-08-24 11:50:14 -05002293 if (perf_callchain_store(entry, regs->ip))
2294 return;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002295
Josh Poimboeuf35f4d9b2016-09-16 14:18:13 -05002296 for (unwind_start(&state, current, regs, NULL); !unwind_done(&state);
2297 unwind_next_frame(&state)) {
2298 addr = unwind_get_return_address(&state);
2299 if (!addr || perf_callchain_store(entry, addr))
2300 return;
2301 }
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002302}
2303
Arun Sharmabc6ca7b2012-04-20 15:41:35 -07002304static inline int
2305valid_user_frame(const void __user *fp, unsigned long size)
2306{
2307 return (__range_not_ok(fp, size, TASK_SIZE) == 0);
2308}
2309
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002310static unsigned long get_segment_base(unsigned int segment)
2311{
2312 struct desc_struct *desc;
2313 int idx = segment >> 3;
2314
2315 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -07002316#ifdef CONFIG_MODIFY_LDT_SYSCALL
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002317 struct ldt_struct *ldt;
2318
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002319 if (idx > LDT_ENTRIES)
2320 return 0;
2321
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002322 /* IRQs are off, so this synchronizes with smp_store_release */
2323 ldt = lockless_dereference(current->active_mm->context.ldt);
2324 if (!ldt || idx > ldt->size)
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002325 return 0;
2326
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002327 desc = &ldt->entries[idx];
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -07002328#else
2329 return 0;
2330#endif
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002331 } else {
2332 if (idx > GDT_ENTRIES)
2333 return 0;
2334
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002335 desc = raw_cpu_ptr(gdt_page.gdt) + idx;
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002336 }
2337
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002338 return get_desc_base(desc);
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002339}
2340
Brian Gerst10ed3492015-06-22 07:55:17 -04002341#ifdef CONFIG_IA32_EMULATION
H. Peter Anvind1a797f2012-02-19 10:06:34 -08002342
2343#include <asm/compat.h>
2344
Torok Edwin257ef9d2010-03-17 12:07:16 +02002345static inline int
Arnaldo Carvalho de Melocfbcf462016-04-28 12:30:53 -03002346perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002347{
Torok Edwin257ef9d2010-03-17 12:07:16 +02002348 /* 32-bit process in 64-bit kernel. */
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002349 unsigned long ss_base, cs_base;
Torok Edwin257ef9d2010-03-17 12:07:16 +02002350 struct stack_frame_ia32 frame;
2351 const void __user *fp;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002352
Torok Edwin257ef9d2010-03-17 12:07:16 +02002353 if (!test_thread_flag(TIF_IA32))
2354 return 0;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002355
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002356 cs_base = get_segment_base(regs->cs);
2357 ss_base = get_segment_base(regs->ss);
2358
2359 fp = compat_ptr(ss_base + regs->bp);
Andi Kleen75925e12015-10-22 15:07:21 -07002360 pagefault_disable();
Arnaldo Carvalho de Melo3b1fff02016-05-10 18:08:32 -03002361 while (entry->nr < entry->max_stack) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02002362 unsigned long bytes;
2363 frame.next_frame = 0;
2364 frame.return_address = 0;
2365
Johannes Weinerae31fe52016-11-22 10:57:42 +01002366 if (!valid_user_frame(fp, sizeof(frame)))
Andi Kleen75925e12015-10-22 15:07:21 -07002367 break;
2368
2369 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
2370 if (bytes != 0)
2371 break;
2372 bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4);
Peter Zijlstra0a196842013-10-30 21:16:22 +01002373 if (bytes != 0)
Torok Edwin257ef9d2010-03-17 12:07:16 +02002374 break;
2375
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002376 perf_callchain_store(entry, cs_base + frame.return_address);
2377 fp = compat_ptr(ss_base + frame.next_frame);
Torok Edwin257ef9d2010-03-17 12:07:16 +02002378 }
Andi Kleen75925e12015-10-22 15:07:21 -07002379 pagefault_enable();
Torok Edwin257ef9d2010-03-17 12:07:16 +02002380 return 1;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002381}
Torok Edwin257ef9d2010-03-17 12:07:16 +02002382#else
2383static inline int
Arnaldo Carvalho de Melocfbcf462016-04-28 12:30:53 -03002384perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
Torok Edwin257ef9d2010-03-17 12:07:16 +02002385{
2386 return 0;
2387}
2388#endif
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002389
Frederic Weisbecker56962b42010-06-30 23:03:51 +02002390void
Arnaldo Carvalho de Melocfbcf462016-04-28 12:30:53 -03002391perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002392{
2393 struct stack_frame frame;
Josh Poimboeuffc188222016-07-01 23:02:05 -05002394 const unsigned long __user *fp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002395
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002396 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2397 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02002398 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002399 }
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002400
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002401 /*
2402 * We don't know what to do with VM86 stacks.. ignore them for now.
2403 */
2404 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
2405 return;
2406
Josh Poimboeuffc188222016-07-01 23:02:05 -05002407 fp = (unsigned long __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002408
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02002409 perf_callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002410
Andrey Vagin20afc602011-08-30 12:32:36 +04002411 if (!current->mm)
2412 return;
2413
Torok Edwin257ef9d2010-03-17 12:07:16 +02002414 if (perf_callchain_user32(regs, entry))
2415 return;
2416
Andi Kleen75925e12015-10-22 15:07:21 -07002417 pagefault_disable();
Arnaldo Carvalho de Melo3b1fff02016-05-10 18:08:32 -03002418 while (entry->nr < entry->max_stack) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02002419 unsigned long bytes;
Josh Poimboeuffc188222016-07-01 23:02:05 -05002420
Ingo Molnar038e8362009-06-15 09:57:59 +02002421 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002422 frame.return_address = 0;
2423
Johannes Weinerae31fe52016-11-22 10:57:42 +01002424 if (!valid_user_frame(fp, sizeof(frame)))
Andi Kleen75925e12015-10-22 15:07:21 -07002425 break;
2426
Josh Poimboeuffc188222016-07-01 23:02:05 -05002427 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
Andi Kleen75925e12015-10-22 15:07:21 -07002428 if (bytes != 0)
2429 break;
Josh Poimboeuffc188222016-07-01 23:02:05 -05002430 bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
Peter Zijlstra0a196842013-10-30 21:16:22 +01002431 if (bytes != 0)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002432 break;
2433
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02002434 perf_callchain_store(entry, frame.return_address);
Andi Kleen75925e12015-10-22 15:07:21 -07002435 fp = (void __user *)frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002436 }
Andi Kleen75925e12015-10-22 15:07:21 -07002437 pagefault_enable();
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002438}
2439
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002440/*
2441 * Deal with code segment offsets for the various execution modes:
2442 *
2443 * VM86 - the good olde 16 bit days, where the linear address is
2444 * 20 bits and we use regs->ip + 0x10 * regs->cs.
2445 *
2446 * IA32 - Where we need to look at GDT/LDT segment descriptor tables
2447 * to figure out what the 32bit base address is.
2448 *
2449 * X32 - has TIF_X32 set, but is running in x86_64
2450 *
2451 * X86_64 - CS,DS,SS,ES are all zero based.
2452 */
2453static unsigned long code_segment_base(struct pt_regs *regs)
2454{
2455 /*
Andy Lutomirski383f3af2015-03-18 18:33:30 -07002456 * For IA32 we look at the GDT/LDT segment base to convert the
2457 * effective IP to a linear address.
2458 */
2459
2460#ifdef CONFIG_X86_32
2461 /*
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002462 * If we are in VM86 mode, add the segment offset to convert to a
2463 * linear address.
2464 */
2465 if (regs->flags & X86_VM_MASK)
2466 return 0x10 * regs->cs;
2467
Ingo Molnar55474c42015-03-29 11:02:34 +02002468 if (user_mode(regs) && regs->cs != __USER_CS)
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002469 return get_segment_base(regs->cs);
2470#else
Andy Lutomirskic56716a2015-03-18 18:33:28 -07002471 if (user_mode(regs) && !user_64bit_mode(regs) &&
2472 regs->cs != __USER32_CS)
2473 return get_segment_base(regs->cs);
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002474#endif
2475 return 0;
2476}
2477
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002478unsigned long perf_instruction_pointer(struct pt_regs *regs)
2479{
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002480 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002481 return perf_guest_cbs->get_guest_ip();
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08002482
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002483 return regs->ip + code_segment_base(regs);
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002484}
2485
2486unsigned long perf_misc_flags(struct pt_regs *regs)
2487{
2488 int misc = 0;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08002489
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002490 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08002491 if (perf_guest_cbs->is_user_mode())
2492 misc |= PERF_RECORD_MISC_GUEST_USER;
2493 else
2494 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
2495 } else {
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002496 if (user_mode(regs))
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08002497 misc |= PERF_RECORD_MISC_USER;
2498 else
2499 misc |= PERF_RECORD_MISC_KERNEL;
2500 }
2501
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002502 if (regs->flags & PERF_EFLAGS_EXACT)
Peter Zijlstraab608342010-04-08 23:03:20 +02002503 misc |= PERF_RECORD_MISC_EXACT_IP;
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002504
2505 return misc;
2506}
Gleb Natapovb3d94682011-11-10 14:57:27 +02002507
2508void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
2509{
2510 cap->version = x86_pmu.version;
2511 cap->num_counters_gp = x86_pmu.num_counters;
2512 cap->num_counters_fixed = x86_pmu.num_counters_fixed;
2513 cap->bit_width_gp = x86_pmu.cntval_bits;
2514 cap->bit_width_fixed = x86_pmu.cntval_bits;
2515 cap->events_mask = (unsigned int)x86_pmu.events_maskl;
2516 cap->events_mask_len = x86_pmu.events_mask_len;
2517}
2518EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);