blob: 019c5887b698af2a5fbf322c7cc5d6d3f8ab7e57 [file] [log] [blame]
Ingo Molnar241771e2008-12-03 10:39:53 +01001/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002 * Performance events x86 architecture code
Ingo Molnar241771e2008-12-03 10:39:53 +01003 *
Ingo Molnar98144512009-04-29 14:52:50 +02004 * Copyright (C) 2008 Thomas Gleixner <tglx@linutronix.de>
5 * Copyright (C) 2008-2009 Red Hat, Inc., Ingo Molnar
6 * Copyright (C) 2009 Jaswinder Singh Rajput
7 * Copyright (C) 2009 Advanced Micro Devices, Inc., Robert Richter
Peter Zijlstra90eec102015-11-16 11:08:45 +01008 * Copyright (C) 2008-2009 Red Hat, Inc., Peter Zijlstra
Markus Metzger30dd5682009-07-21 15:56:48 +02009 * Copyright (C) 2009 Intel Corporation, <markus.t.metzger@intel.com>
Stephane Eranian1da53e02010-01-18 10:58:01 +020010 * Copyright (C) 2009 Google, Inc., Stephane Eranian
Ingo Molnar241771e2008-12-03 10:39:53 +010011 *
12 * For licencing details see kernel-base/COPYING
13 */
14
Ingo Molnarcdd6c482009-09-21 12:02:48 +020015#include <linux/perf_event.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010016#include <linux/capability.h>
17#include <linux/notifier.h>
18#include <linux/hardirq.h>
19#include <linux/kprobes.h>
Paul Gortmakereb008eb2016-07-13 20:19:01 -040020#include <linux/export.h>
21#include <linux/init.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010022#include <linux/kdebug.h>
23#include <linux/sched.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020024#include <linux/uaccess.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090025#include <linux/slab.h>
Markus Metzger30dd5682009-07-21 15:56:48 +020026#include <linux/cpu.h>
Peter Zijlstra272d30b2010-01-22 16:32:17 +010027#include <linux/bitops.h>
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +010028#include <linux/device.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010029
Ingo Molnar241771e2008-12-03 10:39:53 +010030#include <asm/apic.h>
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +020031#include <asm/stacktrace.h>
Peter Zijlstra4e935e42009-03-30 19:07:16 +020032#include <asm/nmi.h>
Lin Ming69092622011-03-03 10:34:50 +080033#include <asm/smp.h>
Robert Richterc8e59102011-04-16 02:27:55 +020034#include <asm/alternative.h>
Andy Lutomirski7911d3f2014-10-24 15:58:12 -070035#include <asm/mmu_context.h>
Andy Lutomirski375074c2014-10-24 15:58:07 -070036#include <asm/tlbflush.h>
Peter Zijlstrae3f35412011-11-21 11:43:53 +010037#include <asm/timer.h>
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +020038#include <asm/desc.h>
39#include <asm/ldt.h>
Josh Poimboeuf35f4d9b2016-09-16 14:18:13 -050040#include <asm/unwind.h>
Ingo Molnar241771e2008-12-03 10:39:53 +010041
Borislav Petkov27f6d222016-02-10 10:55:23 +010042#include "perf_event.h"
Kevin Winchesterde0428a2011-08-30 20:41:05 -030043
Kevin Winchesterde0428a2011-08-30 20:41:05 -030044struct x86_pmu x86_pmu __read_mostly;
Stephane Eranianefc9f052011-06-06 16:57:03 +020045
Kevin Winchesterde0428a2011-08-30 20:41:05 -030046DEFINE_PER_CPU(struct cpu_hw_events, cpu_hw_events) = {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +010047 .enabled = 1,
48};
Ingo Molnar241771e2008-12-03 10:39:53 +010049
Andy Lutomirskia6673422014-10-24 15:58:13 -070050struct static_key rdpmc_always_available = STATIC_KEY_INIT_FALSE;
51
Kevin Winchesterde0428a2011-08-30 20:41:05 -030052u64 __read_mostly hw_cache_event_ids
Ingo Molnar8326f442009-06-05 20:22:46 +020053 [PERF_COUNT_HW_CACHE_MAX]
54 [PERF_COUNT_HW_CACHE_OP_MAX]
55 [PERF_COUNT_HW_CACHE_RESULT_MAX];
Kevin Winchesterde0428a2011-08-30 20:41:05 -030056u64 __read_mostly hw_cache_extra_regs
Andi Kleene994d7d2011-03-03 10:34:48 +080057 [PERF_COUNT_HW_CACHE_MAX]
58 [PERF_COUNT_HW_CACHE_OP_MAX]
59 [PERF_COUNT_HW_CACHE_RESULT_MAX];
Ingo Molnar8326f442009-06-05 20:22:46 +020060
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +053061/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020062 * Propagate event elapsed time into the generic event.
63 * Can only be executed on the CPU where the event is active.
Ingo Molnaree060942008-12-13 09:00:03 +010064 * Returns the delta events processed.
65 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -030066u64 x86_perf_event_update(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +010067{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +010068 struct hw_perf_event *hwc = &event->hw;
Robert Richter948b1bb2010-03-29 18:36:50 +020069 int shift = 64 - x86_pmu.cntval_bits;
Peter Zijlstraec3232b2009-05-13 09:45:19 +020070 u64 prev_raw_count, new_raw_count;
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +010071 int idx = hwc->idx;
Peter Zijlstra (Intel)7f612a72016-11-29 20:33:28 +000072 u64 delta;
Ingo Molnaree060942008-12-13 09:00:03 +010073
Robert Richter15c7ad52012-06-20 20:46:33 +020074 if (idx == INTEL_PMC_IDX_FIXED_BTS)
Markus Metzger30dd5682009-07-21 15:56:48 +020075 return 0;
76
Ingo Molnaree060942008-12-13 09:00:03 +010077 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +020078 * Careful: an NMI might modify the previous event value.
Ingo Molnaree060942008-12-13 09:00:03 +010079 *
80 * Our tactic to handle this is to first atomically read and
81 * exchange a new raw count - then add that new-prev delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +020082 * count to the generic event atomically:
Ingo Molnaree060942008-12-13 09:00:03 +010083 */
84again:
Peter Zijlstrae7850592010-05-21 14:43:08 +020085 prev_raw_count = local64_read(&hwc->prev_count);
Vince Weaverc48b6052012-03-01 17:28:14 -050086 rdpmcl(hwc->event_base_rdpmc, new_raw_count);
Ingo Molnaree060942008-12-13 09:00:03 +010087
Peter Zijlstrae7850592010-05-21 14:43:08 +020088 if (local64_cmpxchg(&hwc->prev_count, prev_raw_count,
Ingo Molnaree060942008-12-13 09:00:03 +010089 new_raw_count) != prev_raw_count)
90 goto again;
91
92 /*
93 * Now we have the new raw value and have updated the prev
94 * timestamp already. We can now calculate the elapsed delta
Ingo Molnarcdd6c482009-09-21 12:02:48 +020095 * (event-)time and add that to the generic event.
Ingo Molnaree060942008-12-13 09:00:03 +010096 *
97 * Careful, not all hw sign-extends above the physical width
Peter Zijlstraec3232b2009-05-13 09:45:19 +020098 * of the count.
Ingo Molnaree060942008-12-13 09:00:03 +010099 */
Peter Zijlstraec3232b2009-05-13 09:45:19 +0200100 delta = (new_raw_count << shift) - (prev_raw_count << shift);
101 delta >>= shift;
Ingo Molnaree060942008-12-13 09:00:03 +0100102
Peter Zijlstrae7850592010-05-21 14:43:08 +0200103 local64_add(delta, &event->count);
104 local64_sub(delta, &hwc->period_left);
Robert Richter4b7bfd02009-04-29 12:47:22 +0200105
106 return new_raw_count;
Ingo Molnaree060942008-12-13 09:00:03 +0100107}
108
Andi Kleena7e3ed12011-03-03 10:34:47 +0800109/*
110 * Find and validate any extra registers to set up.
111 */
112static int x86_pmu_extra_regs(u64 config, struct perf_event *event)
113{
Stephane Eranianefc9f052011-06-06 16:57:03 +0200114 struct hw_perf_event_extra *reg;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800115 struct extra_reg *er;
116
Stephane Eranianefc9f052011-06-06 16:57:03 +0200117 reg = &event->hw.extra_reg;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800118
119 if (!x86_pmu.extra_regs)
120 return 0;
121
122 for (er = x86_pmu.extra_regs; er->msr; er++) {
123 if (er->event != (config & er->config_mask))
124 continue;
125 if (event->attr.config1 & ~er->valid_mask)
126 return -EINVAL;
Kan Liang338b5222014-07-14 12:25:56 -0700127 /* Check if the extra msrs can be safely accessed*/
128 if (!er->extra_msr_access)
129 return -ENXIO;
Stephane Eranianefc9f052011-06-06 16:57:03 +0200130
131 reg->idx = er->idx;
132 reg->config = event->attr.config1;
133 reg->reg = er->msr;
Andi Kleena7e3ed12011-03-03 10:34:47 +0800134 break;
135 }
136 return 0;
137}
138
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200139static atomic_t active_events;
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300140static atomic_t pmc_refcount;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200141static DEFINE_MUTEX(pmc_reserve_mutex);
142
Robert Richterb27ea292010-03-17 12:49:10 +0100143#ifdef CONFIG_X86_LOCAL_APIC
144
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200145static bool reserve_pmc_hardware(void)
146{
147 int i;
148
Robert Richter948b1bb2010-03-29 18:36:50 +0200149 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100150 if (!reserve_perfctr_nmi(x86_pmu_event_addr(i)))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200151 goto perfctr_fail;
152 }
153
Robert Richter948b1bb2010-03-29 18:36:50 +0200154 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100155 if (!reserve_evntsel_nmi(x86_pmu_config_addr(i)))
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200156 goto eventsel_fail;
157 }
158
159 return true;
160
161eventsel_fail:
162 for (i--; i >= 0; i--)
Robert Richter41bf4982011-02-02 17:40:57 +0100163 release_evntsel_nmi(x86_pmu_config_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200164
Robert Richter948b1bb2010-03-29 18:36:50 +0200165 i = x86_pmu.num_counters;
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200166
167perfctr_fail:
168 for (i--; i >= 0; i--)
Robert Richter41bf4982011-02-02 17:40:57 +0100169 release_perfctr_nmi(x86_pmu_event_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200170
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200171 return false;
172}
173
174static void release_pmc_hardware(void)
175{
176 int i;
177
Robert Richter948b1bb2010-03-29 18:36:50 +0200178 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100179 release_perfctr_nmi(x86_pmu_event_addr(i));
180 release_evntsel_nmi(x86_pmu_config_addr(i));
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200181 }
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200182}
183
Robert Richterb27ea292010-03-17 12:49:10 +0100184#else
185
186static bool reserve_pmc_hardware(void) { return true; }
187static void release_pmc_hardware(void) {}
188
189#endif
190
Don Zickus33c6d6a2010-11-22 16:55:23 -0500191static bool check_hw_exists(void)
192{
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100193 u64 val, val_fail, val_new= ~0;
194 int i, reg, reg_fail, ret = 0;
195 int bios_fail = 0;
Don Zickus68ab7472015-05-18 15:16:48 -0400196 int reg_safe = -1;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500197
Peter Zijlstra44072042010-12-08 15:56:23 +0100198 /*
199 * Check to see if the BIOS enabled any of the counters, if so
200 * complain and bail.
201 */
202 for (i = 0; i < x86_pmu.num_counters; i++) {
Robert Richter41bf4982011-02-02 17:40:57 +0100203 reg = x86_pmu_config_addr(i);
Peter Zijlstra44072042010-12-08 15:56:23 +0100204 ret = rdmsrl_safe(reg, &val);
205 if (ret)
206 goto msr_fail;
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100207 if (val & ARCH_PERFMON_EVENTSEL_ENABLE) {
208 bios_fail = 1;
209 val_fail = val;
210 reg_fail = reg;
Don Zickus68ab7472015-05-18 15:16:48 -0400211 } else {
212 reg_safe = i;
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100213 }
Peter Zijlstra44072042010-12-08 15:56:23 +0100214 }
215
216 if (x86_pmu.num_counters_fixed) {
217 reg = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
218 ret = rdmsrl_safe(reg, &val);
219 if (ret)
220 goto msr_fail;
221 for (i = 0; i < x86_pmu.num_counters_fixed; i++) {
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100222 if (val & (0x03 << i*4)) {
223 bios_fail = 1;
224 val_fail = val;
225 reg_fail = reg;
226 }
Peter Zijlstra44072042010-12-08 15:56:23 +0100227 }
228 }
229
230 /*
Don Zickus68ab7472015-05-18 15:16:48 -0400231 * If all the counters are enabled, the below test will always
232 * fail. The tools will also become useless in this scenario.
233 * Just fail and disable the hardware counters.
234 */
235
236 if (reg_safe == -1) {
237 reg = reg_safe;
238 goto msr_fail;
239 }
240
241 /*
Andre Przywarabffd5fc2012-10-09 17:38:35 +0200242 * Read the current value, change it and read it back to see if it
243 * matches, this is needed to detect certain hardware emulators
244 * (qemu/kvm) that don't trap on the MSR access and always return 0s.
Peter Zijlstra44072042010-12-08 15:56:23 +0100245 */
Don Zickus68ab7472015-05-18 15:16:48 -0400246 reg = x86_pmu_event_addr(reg_safe);
Andre Przywarabffd5fc2012-10-09 17:38:35 +0200247 if (rdmsrl_safe(reg, &val))
248 goto msr_fail;
249 val ^= 0xffffUL;
Robert Richterf285f922012-06-20 20:46:36 +0200250 ret = wrmsrl_safe(reg, val);
251 ret |= rdmsrl_safe(reg, &val_new);
Don Zickus33c6d6a2010-11-22 16:55:23 -0500252 if (ret || val != val_new)
Peter Zijlstra44072042010-12-08 15:56:23 +0100253 goto msr_fail;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500254
Ingo Molnar45daae52011-03-25 10:24:23 +0100255 /*
256 * We still allow the PMU driver to operate:
257 */
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100258 if (bios_fail) {
Chen Yucong1b74dde2016-02-02 11:45:02 +0800259 pr_cont("Broken BIOS detected, complain to your hardware vendor.\n");
260 pr_err(FW_BUG "the BIOS has corrupted hw-PMU resources (MSR %x is %Lx)\n",
261 reg_fail, val_fail);
George Dunlapa5ebe0b2013-04-03 15:46:28 +0100262 }
Ingo Molnar45daae52011-03-25 10:24:23 +0100263
264 return true;
Peter Zijlstra44072042010-12-08 15:56:23 +0100265
266msr_fail:
Juergen Gross005bd002016-08-01 13:37:07 +0200267 if (boot_cpu_has(X86_FEATURE_HYPERVISOR)) {
268 pr_cont("PMU not available due to virtualization, using software events only.\n");
269 } else {
270 pr_cont("Broken PMU hardware detected, using software events only.\n");
271 pr_err("Failed to access perfctr msr (MSR %x is %Lx)\n",
272 reg, val_new);
273 }
Ingo Molnar45daae52011-03-25 10:24:23 +0100274
Peter Zijlstra44072042010-12-08 15:56:23 +0100275 return false;
Don Zickus33c6d6a2010-11-22 16:55:23 -0500276}
277
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200278static void hw_perf_event_destroy(struct perf_event *event)
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200279{
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300280 x86_release_hardware();
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300281 atomic_dec(&active_events);
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200282}
283
Alexander Shishkin48070342015-01-14 14:18:20 +0200284void hw_perf_lbr_event_destroy(struct perf_event *event)
285{
286 hw_perf_event_destroy(event);
287
288 /* undo the lbr/bts event accounting */
289 x86_del_exclusive(x86_lbr_exclusive_lbr);
290}
291
Robert Richter85cf9db2009-04-29 12:47:20 +0200292static inline int x86_pmu_initialized(void)
293{
294 return x86_pmu.handle_irq != NULL;
295}
296
Ingo Molnar8326f442009-06-05 20:22:46 +0200297static inline int
Andi Kleene994d7d2011-03-03 10:34:48 +0800298set_ext_hw_attr(struct hw_perf_event *hwc, struct perf_event *event)
Ingo Molnar8326f442009-06-05 20:22:46 +0200299{
Andi Kleene994d7d2011-03-03 10:34:48 +0800300 struct perf_event_attr *attr = &event->attr;
Ingo Molnar8326f442009-06-05 20:22:46 +0200301 unsigned int cache_type, cache_op, cache_result;
302 u64 config, val;
303
304 config = attr->config;
305
306 cache_type = (config >> 0) & 0xff;
307 if (cache_type >= PERF_COUNT_HW_CACHE_MAX)
308 return -EINVAL;
309
310 cache_op = (config >> 8) & 0xff;
311 if (cache_op >= PERF_COUNT_HW_CACHE_OP_MAX)
312 return -EINVAL;
313
314 cache_result = (config >> 16) & 0xff;
315 if (cache_result >= PERF_COUNT_HW_CACHE_RESULT_MAX)
316 return -EINVAL;
317
318 val = hw_cache_event_ids[cache_type][cache_op][cache_result];
319
320 if (val == 0)
321 return -ENOENT;
322
323 if (val == -1)
324 return -EINVAL;
325
326 hwc->config |= val;
Andi Kleene994d7d2011-03-03 10:34:48 +0800327 attr->config1 = hw_cache_extra_regs[cache_type][cache_op][cache_result];
328 return x86_pmu_extra_regs(val, event);
Ingo Molnar8326f442009-06-05 20:22:46 +0200329}
330
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300331int x86_reserve_hardware(void)
332{
333 int err = 0;
334
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300335 if (!atomic_inc_not_zero(&pmc_refcount)) {
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300336 mutex_lock(&pmc_reserve_mutex);
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300337 if (atomic_read(&pmc_refcount) == 0) {
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300338 if (!reserve_pmc_hardware())
339 err = -EBUSY;
340 else
341 reserve_ds_buffers();
342 }
343 if (!err)
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300344 atomic_inc(&pmc_refcount);
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300345 mutex_unlock(&pmc_reserve_mutex);
346 }
347
348 return err;
349}
350
351void x86_release_hardware(void)
352{
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300353 if (atomic_dec_and_mutex_lock(&pmc_refcount, &pmc_reserve_mutex)) {
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300354 release_pmc_hardware();
355 release_ds_buffers();
356 mutex_unlock(&pmc_reserve_mutex);
357 }
358}
359
Alexander Shishkin48070342015-01-14 14:18:20 +0200360/*
361 * Check if we can create event of a certain type (that no conflicting events
362 * are present).
363 */
364int x86_add_exclusive(unsigned int what)
365{
Peter Zijlstra93472af2015-06-24 16:47:50 +0200366 int i;
Alexander Shishkin48070342015-01-14 14:18:20 +0200367
Andi Kleenb0c1ef52016-12-08 16:14:17 -0800368 /*
369 * When lbr_pt_coexist we allow PT to coexist with either LBR or BTS.
370 * LBR and BTS are still mutually exclusive.
371 */
372 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
Alexander Shishkinccbebba2016-04-28 18:35:46 +0300373 return 0;
374
Peter Zijlstra93472af2015-06-24 16:47:50 +0200375 if (!atomic_inc_not_zero(&x86_pmu.lbr_exclusive[what])) {
376 mutex_lock(&pmc_reserve_mutex);
377 for (i = 0; i < ARRAY_SIZE(x86_pmu.lbr_exclusive); i++) {
378 if (i != what && atomic_read(&x86_pmu.lbr_exclusive[i]))
379 goto fail_unlock;
380 }
381 atomic_inc(&x86_pmu.lbr_exclusive[what]);
382 mutex_unlock(&pmc_reserve_mutex);
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300383 }
Alexander Shishkin48070342015-01-14 14:18:20 +0200384
Peter Zijlstra93472af2015-06-24 16:47:50 +0200385 atomic_inc(&active_events);
386 return 0;
Alexander Shishkin48070342015-01-14 14:18:20 +0200387
Peter Zijlstra93472af2015-06-24 16:47:50 +0200388fail_unlock:
Alexander Shishkin48070342015-01-14 14:18:20 +0200389 mutex_unlock(&pmc_reserve_mutex);
Peter Zijlstra93472af2015-06-24 16:47:50 +0200390 return -EBUSY;
Alexander Shishkin48070342015-01-14 14:18:20 +0200391}
392
393void x86_del_exclusive(unsigned int what)
394{
Andi Kleenb0c1ef52016-12-08 16:14:17 -0800395 if (x86_pmu.lbr_pt_coexist && what == x86_lbr_exclusive_pt)
Alexander Shishkinccbebba2016-04-28 18:35:46 +0300396 return;
397
Alexander Shishkin48070342015-01-14 14:18:20 +0200398 atomic_dec(&x86_pmu.lbr_exclusive[what]);
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300399 atomic_dec(&active_events);
Alexander Shishkin48070342015-01-14 14:18:20 +0200400}
401
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300402int x86_setup_perfctr(struct perf_event *event)
Robert Richterc1726f32010-04-13 22:23:11 +0200403{
404 struct perf_event_attr *attr = &event->attr;
405 struct hw_perf_event *hwc = &event->hw;
406 u64 config;
407
Franck Bui-Huu6c7e5502010-11-23 16:21:43 +0100408 if (!is_sampling_event(event)) {
Robert Richterc1726f32010-04-13 22:23:11 +0200409 hwc->sample_period = x86_pmu.max_period;
410 hwc->last_period = hwc->sample_period;
Peter Zijlstrae7850592010-05-21 14:43:08 +0200411 local64_set(&hwc->period_left, hwc->sample_period);
Robert Richterc1726f32010-04-13 22:23:11 +0200412 }
413
414 if (attr->type == PERF_TYPE_RAW)
Peter Zijlstraed13ec52011-11-14 10:03:25 +0100415 return x86_pmu_extra_regs(event->attr.config, event);
Robert Richterc1726f32010-04-13 22:23:11 +0200416
417 if (attr->type == PERF_TYPE_HW_CACHE)
Andi Kleene994d7d2011-03-03 10:34:48 +0800418 return set_ext_hw_attr(hwc, event);
Robert Richterc1726f32010-04-13 22:23:11 +0200419
420 if (attr->config >= x86_pmu.max_events)
421 return -EINVAL;
422
423 /*
424 * The generic map:
425 */
426 config = x86_pmu.event_map(attr->config);
427
428 if (config == 0)
429 return -ENOENT;
430
431 if (config == -1LL)
432 return -EINVAL;
433
434 /*
435 * Branch tracing:
436 */
Peter Zijlstra18a073a2011-04-26 13:24:33 +0200437 if (attr->config == PERF_COUNT_HW_BRANCH_INSTRUCTIONS &&
438 !attr->freq && hwc->sample_period == 1) {
Robert Richterc1726f32010-04-13 22:23:11 +0200439 /* BTS is not supported by this architecture. */
Peter Zijlstra6809b6e2010-10-19 14:22:50 +0200440 if (!x86_pmu.bts_active)
Robert Richterc1726f32010-04-13 22:23:11 +0200441 return -EOPNOTSUPP;
442
443 /* BTS is currently only allowed for user-mode. */
444 if (!attr->exclude_kernel)
445 return -EOPNOTSUPP;
Alexander Shishkin48070342015-01-14 14:18:20 +0200446
447 /* disallow bts if conflicting events are present */
448 if (x86_add_exclusive(x86_lbr_exclusive_lbr))
449 return -EBUSY;
450
451 event->destroy = hw_perf_lbr_event_destroy;
Robert Richterc1726f32010-04-13 22:23:11 +0200452 }
453
454 hwc->config |= config;
455
456 return 0;
457}
Robert Richter4261e0e2010-04-13 22:23:10 +0200458
Stephane Eranianff3fb512012-02-09 23:20:54 +0100459/*
460 * check that branch_sample_type is compatible with
461 * settings needed for precise_ip > 1 which implies
462 * using the LBR to capture ALL taken branches at the
463 * priv levels of the measurement
464 */
465static inline int precise_br_compat(struct perf_event *event)
466{
467 u64 m = event->attr.branch_sample_type;
468 u64 b = 0;
469
470 /* must capture all branches */
471 if (!(m & PERF_SAMPLE_BRANCH_ANY))
472 return 0;
473
474 m &= PERF_SAMPLE_BRANCH_KERNEL | PERF_SAMPLE_BRANCH_USER;
475
476 if (!event->attr.exclude_user)
477 b |= PERF_SAMPLE_BRANCH_USER;
478
479 if (!event->attr.exclude_kernel)
480 b |= PERF_SAMPLE_BRANCH_KERNEL;
481
482 /*
483 * ignore PERF_SAMPLE_BRANCH_HV, not supported on x86
484 */
485
486 return m == b;
487}
488
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300489int x86_pmu_hw_config(struct perf_event *event)
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300490{
Peter Zijlstraab608342010-04-08 23:03:20 +0200491 if (event->attr.precise_ip) {
492 int precise = 0;
493
494 /* Support for constant skid */
Peter Zijlstrac93dc842012-06-08 14:50:50 +0200495 if (x86_pmu.pebs_active && !x86_pmu.pebs_broken) {
Peter Zijlstraab608342010-04-08 23:03:20 +0200496 precise++;
497
Peter Zijlstra5553be22010-10-19 14:38:11 +0200498 /* Support for IP fixup */
Andi Kleen03de8742014-08-07 17:08:54 -0700499 if (x86_pmu.lbr_nr || x86_pmu.intel_cap.pebs_format >= 2)
Peter Zijlstra5553be22010-10-19 14:38:11 +0200500 precise++;
Andi Kleen72469762015-12-04 03:50:52 -0800501
502 if (x86_pmu.pebs_prec_dist)
503 precise++;
Peter Zijlstra5553be22010-10-19 14:38:11 +0200504 }
Peter Zijlstraab608342010-04-08 23:03:20 +0200505
506 if (event->attr.precise_ip > precise)
507 return -EOPNOTSUPP;
Yan, Zheng4b854902014-11-04 21:56:08 -0500508 }
509 /*
510 * check that PEBS LBR correction does not conflict with
511 * whatever the user is asking with attr->branch_sample_type
512 */
513 if (event->attr.precise_ip > 1 && x86_pmu.intel_cap.pebs_format < 2) {
514 u64 *br_type = &event->attr.branch_sample_type;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100515
Yan, Zheng4b854902014-11-04 21:56:08 -0500516 if (has_branch_stack(event)) {
517 if (!precise_br_compat(event))
518 return -EOPNOTSUPP;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100519
Yan, Zheng4b854902014-11-04 21:56:08 -0500520 /* branch_sample_type is compatible */
Stephane Eranianff3fb512012-02-09 23:20:54 +0100521
Yan, Zheng4b854902014-11-04 21:56:08 -0500522 } else {
523 /*
524 * user did not specify branch_sample_type
525 *
526 * For PEBS fixups, we capture all
527 * the branches at the priv level of the
528 * event.
529 */
530 *br_type = PERF_SAMPLE_BRANCH_ANY;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100531
Yan, Zheng4b854902014-11-04 21:56:08 -0500532 if (!event->attr.exclude_user)
533 *br_type |= PERF_SAMPLE_BRANCH_USER;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100534
Yan, Zheng4b854902014-11-04 21:56:08 -0500535 if (!event->attr.exclude_kernel)
536 *br_type |= PERF_SAMPLE_BRANCH_KERNEL;
Stephane Eranianff3fb512012-02-09 23:20:54 +0100537 }
Peter Zijlstraab608342010-04-08 23:03:20 +0200538 }
539
Yan, Zhenge18bf522014-11-04 21:56:03 -0500540 if (event->attr.branch_sample_type & PERF_SAMPLE_BRANCH_CALL_STACK)
541 event->attach_state |= PERF_ATTACH_TASK_DATA;
542
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300543 /*
544 * Generate PMC IRQs:
545 * (keep 'enabled' bit clear for now)
546 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200547 event->hw.config = ARCH_PERFMON_EVENTSEL_INT;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300548
549 /*
550 * Count user and OS events unless requested not to
551 */
Peter Zijlstrab4cdc5c2010-03-30 17:00:06 +0200552 if (!event->attr.exclude_user)
553 event->hw.config |= ARCH_PERFMON_EVENTSEL_USR;
554 if (!event->attr.exclude_kernel)
555 event->hw.config |= ARCH_PERFMON_EVENTSEL_OS;
556
557 if (event->attr.type == PERF_TYPE_RAW)
558 event->hw.config |= event->attr.config & X86_RAW_EVENT_MASK;
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300559
Andi Kleen294fe0f2015-02-17 18:18:06 -0800560 if (event->attr.sample_period && x86_pmu.limit_period) {
561 if (x86_pmu.limit_period(event, event->attr.sample_period) >
562 event->attr.sample_period)
563 return -EINVAL;
564 }
565
Robert Richter9d0fcba62010-04-13 22:23:12 +0200566 return x86_setup_perfctr(event);
Cyrill Gorcunova0727382010-03-11 19:54:39 +0300567}
568
Ingo Molnaree060942008-12-13 09:00:03 +0100569/*
Peter Zijlstra0d486962009-06-02 19:22:16 +0200570 * Setup the hardware configuration for a given attr_type
Ingo Molnar241771e2008-12-03 10:39:53 +0100571 */
Peter Zijlstrab0a873e2010-06-11 13:35:08 +0200572static int __x86_pmu_event_init(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +0100573{
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200574 int err;
Ingo Molnar241771e2008-12-03 10:39:53 +0100575
Robert Richter85cf9db2009-04-29 12:47:20 +0200576 if (!x86_pmu_initialized())
577 return -ENODEV;
Ingo Molnar241771e2008-12-03 10:39:53 +0100578
Alexander Shishkin6b099d92015-06-11 15:13:56 +0300579 err = x86_reserve_hardware();
Peter Zijlstra4e935e42009-03-30 19:07:16 +0200580 if (err)
581 return err;
582
Alexander Shishkin1b7b9382015-06-09 13:03:26 +0300583 atomic_inc(&active_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200584 event->destroy = hw_perf_event_destroy;
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +0200585
Robert Richter4261e0e2010-04-13 22:23:10 +0200586 event->hw.idx = -1;
587 event->hw.last_cpu = -1;
588 event->hw.last_tag = ~0ULL;
Stephane Eranianb6900812009-10-06 16:42:09 +0200589
Stephane Eranianefc9f052011-06-06 16:57:03 +0200590 /* mark unused */
591 event->hw.extra_reg.idx = EXTRA_REG_NONE;
Stephane Eranianb36817e2012-02-09 23:20:53 +0100592 event->hw.branch_reg.idx = EXTRA_REG_NONE;
593
Robert Richter9d0fcba62010-04-13 22:23:12 +0200594 return x86_pmu.hw_config(event);
Robert Richter4261e0e2010-04-13 22:23:10 +0200595}
596
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300597void x86_pmu_disable_all(void)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530598{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500599 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200600 int idx;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100601
Robert Richter948b1bb2010-03-29 18:36:50 +0200602 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100603 u64 val;
604
Robert Richter43f62012009-04-29 16:55:56 +0200605 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200606 continue;
Robert Richter41bf4982011-02-02 17:40:57 +0100607 rdmsrl(x86_pmu_config_addr(idx), val);
Robert Richterbb1165d2010-03-01 14:21:23 +0100608 if (!(val & ARCH_PERFMON_EVENTSEL_ENABLE))
Robert Richter4295ee62009-04-29 12:47:01 +0200609 continue;
Robert Richterbb1165d2010-03-01 14:21:23 +0100610 val &= ~ARCH_PERFMON_EVENTSEL_ENABLE;
Robert Richter41bf4982011-02-02 17:40:57 +0100611 wrmsrl(x86_pmu_config_addr(idx), val);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530612 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530613}
614
Kan Liangc3d266c2016-03-03 18:07:28 -0500615/*
616 * There may be PMI landing after enabled=0. The PMI hitting could be before or
617 * after disable_all.
618 *
619 * If PMI hits before disable_all, the PMU will be disabled in the NMI handler.
620 * It will not be re-enabled in the NMI handler again, because enabled=0. After
621 * handling the NMI, disable_all will be called, which will not change the
622 * state either. If PMI hits after disable_all, the PMU is already disabled
623 * before entering NMI handler. The NMI handler will not change the state
624 * either.
625 *
626 * So either situation is harmless.
627 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +0200628static void x86_pmu_disable(struct pmu *pmu)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530629{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500630 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200631
Robert Richter85cf9db2009-04-29 12:47:20 +0200632 if (!x86_pmu_initialized())
Peter Zijlstra9e35ad32009-05-13 16:21:38 +0200633 return;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200634
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +0100635 if (!cpuc->enabled)
636 return;
637
638 cpuc->n_added = 0;
639 cpuc->enabled = 0;
640 barrier();
Stephane Eranian1da53e02010-01-18 10:58:01 +0200641
642 x86_pmu.disable_all();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +0530643}
Ingo Molnar241771e2008-12-03 10:39:53 +0100644
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300645void x86_pmu_enable_all(int added)
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530646{
Christoph Lameter89cbc762014-08-17 12:30:40 -0500647 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530648 int idx;
649
Robert Richter948b1bb2010-03-29 18:36:50 +0200650 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richterd45dd922011-02-02 17:40:56 +0100651 struct hw_perf_event *hwc = &cpuc->events[idx]->hw;
Peter Zijlstrab0f3f282009-03-05 18:08:27 +0100652
Robert Richter43f62012009-04-29 16:55:56 +0200653 if (!test_bit(idx, cpuc->active_mask))
Robert Richter4295ee62009-04-29 12:47:01 +0200654 continue;
Peter Zijlstra984b8382009-07-10 09:59:56 +0200655
Robert Richterd45dd922011-02-02 17:40:56 +0100656 __x86_pmu_enable_event(hwc, ARCH_PERFMON_EVENTSEL_ENABLE);
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +0530657 }
658}
659
Peter Zijlstra51b0fe32010-06-11 13:35:57 +0200660static struct pmu pmu;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200661
662static inline int is_x86_event(struct perf_event *event)
663{
664 return event->pmu == &pmu;
665}
666
Robert Richter1e2ad282011-11-18 12:35:21 +0100667/*
668 * Event scheduler state:
669 *
670 * Assign events iterating over all events and counters, beginning
671 * with events with least weights first. Keep the current iterator
672 * state in struct sched_state.
673 */
674struct sched_state {
675 int weight;
676 int event; /* event index */
677 int counter; /* counter index */
678 int unassigned; /* number of events to be assigned left */
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200679 int nr_gp; /* number of GP counters used */
Robert Richter1e2ad282011-11-18 12:35:21 +0100680 unsigned long used[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
681};
682
Robert Richterbc1738f2011-11-18 12:35:22 +0100683/* Total max is X86_PMC_IDX_MAX, but we are O(n!) limited */
684#define SCHED_STATES_MAX 2
685
Robert Richter1e2ad282011-11-18 12:35:21 +0100686struct perf_sched {
687 int max_weight;
688 int max_events;
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200689 int max_gp;
690 int saved_states;
Peter Zijlstrab371b592015-05-21 10:57:13 +0200691 struct event_constraint **constraints;
Robert Richter1e2ad282011-11-18 12:35:21 +0100692 struct sched_state state;
Robert Richterbc1738f2011-11-18 12:35:22 +0100693 struct sched_state saved[SCHED_STATES_MAX];
Robert Richter1e2ad282011-11-18 12:35:21 +0100694};
695
696/*
697 * Initialize interator that runs through all events and counters.
698 */
Peter Zijlstrab371b592015-05-21 10:57:13 +0200699static void perf_sched_init(struct perf_sched *sched, struct event_constraint **constraints,
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200700 int num, int wmin, int wmax, int gpmax)
Robert Richter1e2ad282011-11-18 12:35:21 +0100701{
702 int idx;
703
704 memset(sched, 0, sizeof(*sched));
705 sched->max_events = num;
706 sched->max_weight = wmax;
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200707 sched->max_gp = gpmax;
Peter Zijlstrab371b592015-05-21 10:57:13 +0200708 sched->constraints = constraints;
Robert Richter1e2ad282011-11-18 12:35:21 +0100709
710 for (idx = 0; idx < num; idx++) {
Peter Zijlstrab371b592015-05-21 10:57:13 +0200711 if (constraints[idx]->weight == wmin)
Robert Richter1e2ad282011-11-18 12:35:21 +0100712 break;
713 }
714
715 sched->state.event = idx; /* start with min weight */
716 sched->state.weight = wmin;
717 sched->state.unassigned = num;
718}
719
Robert Richterbc1738f2011-11-18 12:35:22 +0100720static void perf_sched_save_state(struct perf_sched *sched)
721{
722 if (WARN_ON_ONCE(sched->saved_states >= SCHED_STATES_MAX))
723 return;
724
725 sched->saved[sched->saved_states] = sched->state;
726 sched->saved_states++;
727}
728
729static bool perf_sched_restore_state(struct perf_sched *sched)
730{
731 if (!sched->saved_states)
732 return false;
733
734 sched->saved_states--;
735 sched->state = sched->saved[sched->saved_states];
736
737 /* continue with next counter: */
738 clear_bit(sched->state.counter++, sched->state.used);
739
740 return true;
741}
742
Robert Richter1e2ad282011-11-18 12:35:21 +0100743/*
744 * Select a counter for the current event to schedule. Return true on
745 * success.
746 */
Robert Richterbc1738f2011-11-18 12:35:22 +0100747static bool __perf_sched_find_counter(struct perf_sched *sched)
Robert Richter1e2ad282011-11-18 12:35:21 +0100748{
749 struct event_constraint *c;
750 int idx;
751
752 if (!sched->state.unassigned)
753 return false;
754
755 if (sched->state.event >= sched->max_events)
756 return false;
757
Peter Zijlstrab371b592015-05-21 10:57:13 +0200758 c = sched->constraints[sched->state.event];
Peter Zijlstra4defea82011-11-10 15:15:42 +0100759 /* Prefer fixed purpose counters */
Robert Richter15c7ad52012-06-20 20:46:33 +0200760 if (c->idxmsk64 & (~0ULL << INTEL_PMC_IDX_FIXED)) {
761 idx = INTEL_PMC_IDX_FIXED;
Akinobu Mita307b1cd2012-03-23 15:02:03 -0700762 for_each_set_bit_from(idx, c->idxmsk, X86_PMC_IDX_MAX) {
Peter Zijlstra4defea82011-11-10 15:15:42 +0100763 if (!__test_and_set_bit(idx, sched->state.used))
764 goto done;
765 }
766 }
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200767
Robert Richter1e2ad282011-11-18 12:35:21 +0100768 /* Grab the first unused counter starting with idx */
769 idx = sched->state.counter;
Robert Richter15c7ad52012-06-20 20:46:33 +0200770 for_each_set_bit_from(idx, c->idxmsk, INTEL_PMC_IDX_FIXED) {
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200771 if (!__test_and_set_bit(idx, sched->state.used)) {
772 if (sched->state.nr_gp++ >= sched->max_gp)
773 return false;
774
Peter Zijlstra4defea82011-11-10 15:15:42 +0100775 goto done;
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200776 }
Robert Richter1e2ad282011-11-18 12:35:21 +0100777 }
Robert Richter1e2ad282011-11-18 12:35:21 +0100778
Peter Zijlstra4defea82011-11-10 15:15:42 +0100779 return false;
780
781done:
782 sched->state.counter = idx;
Robert Richter1e2ad282011-11-18 12:35:21 +0100783
Robert Richterbc1738f2011-11-18 12:35:22 +0100784 if (c->overlap)
785 perf_sched_save_state(sched);
786
787 return true;
788}
789
790static bool perf_sched_find_counter(struct perf_sched *sched)
791{
792 while (!__perf_sched_find_counter(sched)) {
793 if (!perf_sched_restore_state(sched))
794 return false;
795 }
796
Robert Richter1e2ad282011-11-18 12:35:21 +0100797 return true;
798}
799
800/*
801 * Go through all unassigned events and find the next one to schedule.
802 * Take events with the least weight first. Return true on success.
803 */
804static bool perf_sched_next_event(struct perf_sched *sched)
805{
806 struct event_constraint *c;
807
808 if (!sched->state.unassigned || !--sched->state.unassigned)
809 return false;
810
811 do {
812 /* next event */
813 sched->state.event++;
814 if (sched->state.event >= sched->max_events) {
815 /* next weight */
816 sched->state.event = 0;
817 sched->state.weight++;
818 if (sched->state.weight > sched->max_weight)
819 return false;
820 }
Peter Zijlstrab371b592015-05-21 10:57:13 +0200821 c = sched->constraints[sched->state.event];
Robert Richter1e2ad282011-11-18 12:35:21 +0100822 } while (c->weight != sched->state.weight);
823
824 sched->state.counter = 0; /* start with first counter */
825
826 return true;
827}
828
829/*
830 * Assign a counter for each event.
831 */
Peter Zijlstrab371b592015-05-21 10:57:13 +0200832int perf_assign_events(struct event_constraint **constraints, int n,
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200833 int wmin, int wmax, int gpmax, int *assign)
Robert Richter1e2ad282011-11-18 12:35:21 +0100834{
835 struct perf_sched sched;
836
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200837 perf_sched_init(&sched, constraints, n, wmin, wmax, gpmax);
Robert Richter1e2ad282011-11-18 12:35:21 +0100838
839 do {
840 if (!perf_sched_find_counter(&sched))
841 break; /* failed */
842 if (assign)
843 assign[sched.state.event] = sched.state.counter;
844 } while (perf_sched_next_event(&sched));
845
846 return sched.state.unassigned;
847}
Yan, Zheng4a3dc122014-03-18 16:56:43 +0800848EXPORT_SYMBOL_GPL(perf_assign_events);
Robert Richter1e2ad282011-11-18 12:35:21 +0100849
Kevin Winchesterde0428a2011-08-30 20:41:05 -0300850int x86_schedule_events(struct cpu_hw_events *cpuc, int n, int *assign)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200851{
Andrew Hunter43b457802013-05-23 11:07:03 -0700852 struct event_constraint *c;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200853 unsigned long used_mask[BITS_TO_LONGS(X86_PMC_IDX_MAX)];
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200854 struct perf_event *e;
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100855 int i, wmin, wmax, unsched = 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200856 struct hw_perf_event *hwc;
857
858 bitmap_zero(used_mask, X86_PMC_IDX_MAX);
859
Maria Dimakopoulouc5362c02014-11-17 20:06:55 +0100860 if (x86_pmu.start_scheduling)
861 x86_pmu.start_scheduling(cpuc);
862
Robert Richter1e2ad282011-11-18 12:35:21 +0100863 for (i = 0, wmin = X86_PMC_IDX_MAX, wmax = 0; i < n; i++) {
Peter Zijlstrab371b592015-05-21 10:57:13 +0200864 cpuc->event_constraint[i] = NULL;
Stephane Eranian79cba822014-11-17 20:06:56 +0100865 c = x86_pmu.get_event_constraints(cpuc, i, cpuc->event_list[i]);
Peter Zijlstrab371b592015-05-21 10:57:13 +0200866 cpuc->event_constraint[i] = c;
Andrew Hunter43b457802013-05-23 11:07:03 -0700867
Robert Richter1e2ad282011-11-18 12:35:21 +0100868 wmin = min(wmin, c->weight);
869 wmax = max(wmax, c->weight);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200870 }
871
872 /*
Stephane Eranian81130702010-01-21 17:39:01 +0200873 * fastpath, try to reuse previous register
874 */
Peter Zijlstrac933c1a2010-01-22 16:40:12 +0100875 for (i = 0; i < n; i++) {
Stephane Eranian81130702010-01-21 17:39:01 +0200876 hwc = &cpuc->event_list[i]->hw;
Peter Zijlstrab371b592015-05-21 10:57:13 +0200877 c = cpuc->event_constraint[i];
Stephane Eranian81130702010-01-21 17:39:01 +0200878
879 /* never assigned */
880 if (hwc->idx == -1)
881 break;
882
883 /* constraint still honored */
Peter Zijlstra63b14642010-01-22 16:32:17 +0100884 if (!test_bit(hwc->idx, c->idxmsk))
Stephane Eranian81130702010-01-21 17:39:01 +0200885 break;
886
887 /* not already used */
888 if (test_bit(hwc->idx, used_mask))
889 break;
890
Peter Zijlstra34538ee2010-03-02 21:16:55 +0100891 __set_bit(hwc->idx, used_mask);
Stephane Eranian81130702010-01-21 17:39:01 +0200892 if (assign)
893 assign[i] = hwc->idx;
894 }
Stephane Eranian81130702010-01-21 17:39:01 +0200895
Robert Richter1e2ad282011-11-18 12:35:21 +0100896 /* slow path */
Peter Zijlstrab371b592015-05-21 10:57:13 +0200897 if (i != n) {
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200898 int gpmax = x86_pmu.num_counters;
899
900 /*
901 * Do not allow scheduling of more than half the available
902 * generic counters.
903 *
904 * This helps avoid counter starvation of sibling thread by
905 * ensuring at most half the counters cannot be in exclusive
906 * mode. There is no designated counters for the limits. Any
907 * N/2 counters can be used. This helps with events with
908 * specific counter constraints.
909 */
910 if (is_ht_workaround_enabled() && !cpuc->is_fake &&
911 READ_ONCE(cpuc->excl_cntrs->exclusive_present))
912 gpmax /= 2;
913
Peter Zijlstrab371b592015-05-21 10:57:13 +0200914 unsched = perf_assign_events(cpuc->event_constraint, n, wmin,
Peter Zijlstracc1790c2015-05-21 10:57:17 +0200915 wmax, gpmax, assign);
Peter Zijlstrab371b592015-05-21 10:57:13 +0200916 }
Stephane Eranian81130702010-01-21 17:39:01 +0200917
Stephane Eranian1da53e02010-01-18 10:58:01 +0200918 /*
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100919 * In case of success (unsched = 0), mark events as committed,
920 * so we do not put_constraint() in case new events are added
921 * and fail to be scheduled
922 *
923 * We invoke the lower level commit callback to lock the resource
924 *
925 * We do not need to do all of this in case we are called to
926 * validate an event group (assign == NULL)
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200927 */
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100928 if (!unsched && assign) {
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200929 for (i = 0; i < n; i++) {
930 e = cpuc->event_list[i];
931 e->hw.flags |= PERF_X86_EVENT_COMMITTED;
Maria Dimakopoulouc5362c02014-11-17 20:06:55 +0100932 if (x86_pmu.commit_scheduling)
Peter Zijlstrab371b592015-05-21 10:57:13 +0200933 x86_pmu.commit_scheduling(cpuc, i, assign[i]);
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200934 }
Peter Zijlstra8736e542015-05-21 10:57:43 +0200935 } else {
Stephane Eranian1da53e02010-01-18 10:58:01 +0200936 for (i = 0; i < n; i++) {
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200937 e = cpuc->event_list[i];
938 /*
939 * do not put_constraint() on comitted events,
940 * because they are good to go
941 */
942 if ((e->hw.flags & PERF_X86_EVENT_COMMITTED))
943 continue;
944
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100945 /*
946 * release events that failed scheduling
947 */
Stephane Eranian1da53e02010-01-18 10:58:01 +0200948 if (x86_pmu.put_event_constraints)
Stephane Eranian2f7f73a2013-06-20 18:42:54 +0200949 x86_pmu.put_event_constraints(cpuc, e);
Stephane Eranian1da53e02010-01-18 10:58:01 +0200950 }
951 }
Maria Dimakopoulouc5362c02014-11-17 20:06:55 +0100952
953 if (x86_pmu.stop_scheduling)
954 x86_pmu.stop_scheduling(cpuc);
955
Maria Dimakopouloue9791212014-11-17 20:06:58 +0100956 return unsched ? -EINVAL : 0;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200957}
958
959/*
960 * dogrp: true if must collect siblings events (group)
961 * returns total number of events and error code
962 */
963static int collect_events(struct cpu_hw_events *cpuc, struct perf_event *leader, bool dogrp)
964{
965 struct perf_event *event;
966 int n, max_count;
967
Robert Richter948b1bb2010-03-29 18:36:50 +0200968 max_count = x86_pmu.num_counters + x86_pmu.num_counters_fixed;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200969
970 /* current number of events already accepted */
971 n = cpuc->n_events;
972
973 if (is_x86_event(leader)) {
974 if (n >= max_count)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100975 return -EINVAL;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200976 cpuc->event_list[n] = leader;
977 n++;
978 }
979 if (!dogrp)
980 return n;
981
982 list_for_each_entry(event, &leader->sibling_list, group_entry) {
983 if (!is_x86_event(event) ||
Stephane Eranian81130702010-01-21 17:39:01 +0200984 event->state <= PERF_EVENT_STATE_OFF)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200985 continue;
986
987 if (n >= max_count)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +0100988 return -EINVAL;
Stephane Eranian1da53e02010-01-18 10:58:01 +0200989
990 cpuc->event_list[n] = event;
991 n++;
992 }
993 return n;
994}
995
Stephane Eranian1da53e02010-01-18 10:58:01 +0200996static inline void x86_assign_hw_event(struct perf_event *event,
Stephane Eranian447a1942010-02-01 14:50:01 +0200997 struct cpu_hw_events *cpuc, int i)
Stephane Eranian1da53e02010-01-18 10:58:01 +0200998{
Stephane Eranian447a1942010-02-01 14:50:01 +0200999 struct hw_perf_event *hwc = &event->hw;
1000
1001 hwc->idx = cpuc->assign[i];
1002 hwc->last_cpu = smp_processor_id();
1003 hwc->last_tag = ++cpuc->tags[i];
Stephane Eranian1da53e02010-01-18 10:58:01 +02001004
Robert Richter15c7ad52012-06-20 20:46:33 +02001005 if (hwc->idx == INTEL_PMC_IDX_FIXED_BTS) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001006 hwc->config_base = 0;
1007 hwc->event_base = 0;
Robert Richter15c7ad52012-06-20 20:46:33 +02001008 } else if (hwc->idx >= INTEL_PMC_IDX_FIXED) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001009 hwc->config_base = MSR_ARCH_PERFMON_FIXED_CTR_CTRL;
Robert Richter15c7ad52012-06-20 20:46:33 +02001010 hwc->event_base = MSR_ARCH_PERFMON_FIXED_CTR0 + (hwc->idx - INTEL_PMC_IDX_FIXED);
1011 hwc->event_base_rdpmc = (hwc->idx - INTEL_PMC_IDX_FIXED) | 1<<30;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001012 } else {
Robert Richter73d6e522011-02-02 17:40:59 +01001013 hwc->config_base = x86_pmu_config_addr(hwc->idx);
1014 hwc->event_base = x86_pmu_event_addr(hwc->idx);
Jacob Shin0fbdad02013-02-06 11:26:28 -06001015 hwc->event_base_rdpmc = x86_pmu_rdpmc_index(hwc->idx);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001016 }
1017}
1018
Stephane Eranian447a1942010-02-01 14:50:01 +02001019static inline int match_prev_assignment(struct hw_perf_event *hwc,
1020 struct cpu_hw_events *cpuc,
1021 int i)
1022{
1023 return hwc->idx == cpuc->assign[i] &&
1024 hwc->last_cpu == smp_processor_id() &&
1025 hwc->last_tag == cpuc->tags[i];
1026}
1027
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001028static void x86_pmu_start(struct perf_event *event, int flags);
Peter Zijlstra2e841872010-01-25 15:58:43 +01001029
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001030static void x86_pmu_enable(struct pmu *pmu)
Ingo Molnaree060942008-12-13 09:00:03 +01001031{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001032 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001033 struct perf_event *event;
1034 struct hw_perf_event *hwc;
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001035 int i, added = cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001036
Robert Richter85cf9db2009-04-29 12:47:20 +02001037 if (!x86_pmu_initialized())
Ingo Molnar2b9ff0d2008-12-14 18:36:30 +01001038 return;
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +01001039
1040 if (cpuc->enabled)
1041 return;
1042
Stephane Eranian1da53e02010-01-18 10:58:01 +02001043 if (cpuc->n_added) {
Peter Zijlstra19925ce2010-03-06 13:20:40 +01001044 int n_running = cpuc->n_events - cpuc->n_added;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001045 /*
1046 * apply assignment obtained either from
1047 * hw_perf_group_sched_in() or x86_pmu_enable()
1048 *
1049 * step1: save events moving to new counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02001050 */
Peter Zijlstra19925ce2010-03-06 13:20:40 +01001051 for (i = 0; i < n_running; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001052 event = cpuc->event_list[i];
1053 hwc = &event->hw;
1054
Stephane Eranian447a1942010-02-01 14:50:01 +02001055 /*
1056 * we can avoid reprogramming counter if:
1057 * - assigned same counter as last time
1058 * - running on same CPU as last time
1059 * - no other event has used the counter since
1060 */
1061 if (hwc->idx == -1 ||
1062 match_prev_assignment(hwc, cpuc, i))
Stephane Eranian1da53e02010-01-18 10:58:01 +02001063 continue;
1064
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001065 /*
1066 * Ensure we don't accidentally enable a stopped
1067 * counter simply because we rescheduled.
1068 */
1069 if (hwc->state & PERF_HES_STOPPED)
1070 hwc->state |= PERF_HES_ARCH;
1071
1072 x86_pmu_stop(event, PERF_EF_UPDATE);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001073 }
1074
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001075 /*
1076 * step2: reprogram moved events into new counters
1077 */
Stephane Eranian1da53e02010-01-18 10:58:01 +02001078 for (i = 0; i < cpuc->n_events; i++) {
Stephane Eranian1da53e02010-01-18 10:58:01 +02001079 event = cpuc->event_list[i];
1080 hwc = &event->hw;
1081
Peter Zijlstra45e16a62010-03-11 13:40:30 +01001082 if (!match_prev_assignment(hwc, cpuc, i))
Stephane Eranian447a1942010-02-01 14:50:01 +02001083 x86_assign_hw_event(event, cpuc, i);
Peter Zijlstra45e16a62010-03-11 13:40:30 +01001084 else if (i < n_running)
1085 continue;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001086
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001087 if (hwc->state & PERF_HES_ARCH)
1088 continue;
1089
1090 x86_pmu_start(event, PERF_EF_RELOAD);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001091 }
1092 cpuc->n_added = 0;
1093 perf_events_lapic_init();
1094 }
Peter Zijlstra1a6e21f2010-01-27 23:07:47 +01001095
1096 cpuc->enabled = 1;
1097 barrier();
1098
Peter Zijlstra11164cd2010-03-26 14:08:44 +01001099 x86_pmu.enable_all(added);
Ingo Molnaree060942008-12-13 09:00:03 +01001100}
Ingo Molnaree060942008-12-13 09:00:03 +01001101
Tejun Heo245b2e72009-06-24 15:13:48 +09001102static DEFINE_PER_CPU(u64 [X86_PMC_IDX_MAX], pmc_prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001103
Ingo Molnaree060942008-12-13 09:00:03 +01001104/*
1105 * Set the next IRQ period, based on the hwc->period_left value.
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001106 * To be called with the event disabled in hw:
Ingo Molnaree060942008-12-13 09:00:03 +01001107 */
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001108int x86_perf_event_set_period(struct perf_event *event)
Ingo Molnar241771e2008-12-03 10:39:53 +01001109{
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001110 struct hw_perf_event *hwc = &event->hw;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001111 s64 left = local64_read(&hwc->period_left);
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001112 s64 period = hwc->sample_period;
Peter Zijlstra7645a242010-03-08 13:51:31 +01001113 int ret = 0, idx = hwc->idx;
Ingo Molnar241771e2008-12-03 10:39:53 +01001114
Robert Richter15c7ad52012-06-20 20:46:33 +02001115 if (idx == INTEL_PMC_IDX_FIXED_BTS)
Markus Metzger30dd5682009-07-21 15:56:48 +02001116 return 0;
1117
Ingo Molnaree060942008-12-13 09:00:03 +01001118 /*
André Goddard Rosaaf901ca2009-11-14 13:09:05 -02001119 * If we are way outside a reasonable range then just skip forward:
Ingo Molnaree060942008-12-13 09:00:03 +01001120 */
1121 if (unlikely(left <= -period)) {
1122 left = period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001123 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001124 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001125 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001126 }
1127
1128 if (unlikely(left <= 0)) {
1129 left += period;
Peter Zijlstrae7850592010-05-21 14:43:08 +02001130 local64_set(&hwc->period_left, left);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001131 hwc->last_period = period;
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001132 ret = 1;
Ingo Molnaree060942008-12-13 09:00:03 +01001133 }
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001134 /*
Ingo Molnardfc65092009-09-21 11:31:35 +02001135 * Quirk: certain CPUs dont like it if just 1 hw_event is left:
Ingo Molnar1c80f4b2009-05-15 08:25:22 +02001136 */
1137 if (unlikely(left < 2))
1138 left = 2;
Ingo Molnaree060942008-12-13 09:00:03 +01001139
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001140 if (left > x86_pmu.max_period)
1141 left = x86_pmu.max_period;
1142
Andi Kleen294fe0f2015-02-17 18:18:06 -08001143 if (x86_pmu.limit_period)
1144 left = x86_pmu.limit_period(event, left);
1145
Tejun Heo245b2e72009-06-24 15:13:48 +09001146 per_cpu(pmc_prev_left[idx], smp_processor_id()) = left;
Ingo Molnaree060942008-12-13 09:00:03 +01001147
Yan, Zheng851559e2015-05-06 15:33:47 -04001148 if (!(hwc->flags & PERF_X86_EVENT_AUTO_RELOAD) ||
1149 local64_read(&hwc->prev_count) != (u64)-left) {
1150 /*
1151 * The hw event starts counting from this event offset,
1152 * mark it to be able to extra future deltas:
1153 */
1154 local64_set(&hwc->prev_count, (u64)-left);
Ingo Molnaree060942008-12-13 09:00:03 +01001155
Yan, Zheng851559e2015-05-06 15:33:47 -04001156 wrmsrl(hwc->event_base, (u64)(-left) & x86_pmu.cntval_mask);
1157 }
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +04001158
1159 /*
1160 * Due to erratum on certan cpu we need
1161 * a second write to be sure the register
1162 * is updated properly
1163 */
1164 if (x86_pmu.perfctr_second_write) {
Robert Richter73d6e522011-02-02 17:40:59 +01001165 wrmsrl(hwc->event_base,
Robert Richter948b1bb2010-03-29 18:36:50 +02001166 (u64)(-left) & x86_pmu.cntval_mask);
Cyrill Gorcunov68aa00a2010-06-03 01:23:04 +04001167 }
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001168
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001169 perf_event_update_userpage(event);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001170
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001171 return ret;
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001172}
1173
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001174void x86_pmu_enable_event(struct perf_event *event)
Robert Richter7c90cc42009-04-29 12:47:18 +02001175{
Tejun Heo0a3aee02010-12-18 16:28:55 +01001176 if (__this_cpu_read(cpu_hw_events.enabled))
Robert Richter31fa58a2010-04-13 22:23:14 +02001177 __x86_pmu_enable_event(&event->hw,
1178 ARCH_PERFMON_EVENTSEL_ENABLE);
Ingo Molnar241771e2008-12-03 10:39:53 +01001179}
1180
Ingo Molnaree060942008-12-13 09:00:03 +01001181/*
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001182 * Add a single event to the PMU.
Stephane Eranian1da53e02010-01-18 10:58:01 +02001183 *
1184 * The event is added to the group of enabled events
1185 * but only if it can be scehduled with existing events.
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001186 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001187static int x86_pmu_add(struct perf_event *event, int flags)
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001188{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001189 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001190 struct hw_perf_event *hwc;
1191 int assign[X86_PMC_IDX_MAX];
1192 int n, n0, ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001193
Stephane Eranian1da53e02010-01-18 10:58:01 +02001194 hwc = &event->hw;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02001195
Stephane Eranian1da53e02010-01-18 10:58:01 +02001196 n0 = cpuc->n_events;
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001197 ret = n = collect_events(cpuc, event, false);
1198 if (ret < 0)
1199 goto out;
Ingo Molnar53b441a2009-05-25 21:41:28 +02001200
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001201 hwc->state = PERF_HES_UPTODATE | PERF_HES_STOPPED;
1202 if (!(flags & PERF_EF_START))
1203 hwc->state |= PERF_HES_ARCH;
1204
Lin Ming4d1c52b2010-04-23 13:56:12 +08001205 /*
1206 * If group events scheduling transaction was started,
Lucas De Marchi0d2eb442011-03-17 16:24:16 -03001207 * skip the schedulability test here, it will be performed
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001208 * at commit time (->commit_txn) as a whole.
Peter Zijlstra68f70822016-07-06 18:02:43 +02001209 *
1210 * If commit fails, we'll call ->del() on all events
1211 * for which ->add() was called.
Lin Ming4d1c52b2010-04-23 13:56:12 +08001212 */
Sukadev Bhattiprolu8f3e5682015-09-03 20:07:53 -07001213 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001214 goto done_collect;
Lin Ming4d1c52b2010-04-23 13:56:12 +08001215
Cyrill Gorcunova0727382010-03-11 19:54:39 +03001216 ret = x86_pmu.schedule_events(cpuc, n, assign);
Stephane Eranian1da53e02010-01-18 10:58:01 +02001217 if (ret)
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001218 goto out;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001219 /*
1220 * copy new assignment, now we know it is possible
1221 * will be used by hw_perf_enable()
1222 */
1223 memcpy(cpuc->assign, assign, n*sizeof(int));
Ingo Molnar241771e2008-12-03 10:39:53 +01001224
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001225done_collect:
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001226 /*
1227 * Commit the collect_events() state. See x86_pmu_del() and
1228 * x86_pmu_*_txn().
1229 */
Stephane Eranian1da53e02010-01-18 10:58:01 +02001230 cpuc->n_events = n;
Peter Zijlstra356e1f22010-03-06 13:49:56 +01001231 cpuc->n_added += n - n0;
Stephane Eranian90151c352010-05-25 16:23:10 +02001232 cpuc->n_txn += n - n0;
Ingo Molnar7e2ae342008-12-09 11:40:46 +01001233
Peter Zijlstra68f70822016-07-06 18:02:43 +02001234 if (x86_pmu.add) {
1235 /*
1236 * This is before x86_pmu_enable() will call x86_pmu_start(),
1237 * so we enable LBRs before an event needs them etc..
1238 */
1239 x86_pmu.add(event);
1240 }
1241
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001242 ret = 0;
1243out:
Peter Zijlstra24cd7f52010-06-11 17:32:03 +02001244 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001245}
1246
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001247static void x86_pmu_start(struct perf_event *event, int flags)
Stephane Eraniand76a0812010-02-08 17:06:01 +02001248{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001249 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001250 int idx = event->hw.idx;
1251
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001252 if (WARN_ON_ONCE(!(event->hw.state & PERF_HES_STOPPED)))
1253 return;
Stephane Eraniand76a0812010-02-08 17:06:01 +02001254
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001255 if (WARN_ON_ONCE(idx == -1))
1256 return;
1257
1258 if (flags & PERF_EF_RELOAD) {
1259 WARN_ON_ONCE(!(event->hw.state & PERF_HES_UPTODATE));
1260 x86_perf_event_set_period(event);
1261 }
1262
1263 event->hw.state = 0;
1264
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001265 cpuc->events[idx] = event;
1266 __set_bit(idx, cpuc->active_mask);
Robert Richter63e6be62010-09-15 18:20:34 +02001267 __set_bit(idx, cpuc->running);
Peter Zijlstraaff3d912010-03-02 20:32:08 +01001268 x86_pmu.enable(event);
Peter Zijlstrac08053e2010-03-06 13:19:24 +01001269 perf_event_update_userpage(event);
Peter Zijlstraa78ac322009-05-25 17:39:05 +02001270}
1271
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001272void perf_event_print_debug(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001273{
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001274 u64 ctrl, status, overflow, pmc_ctrl, pmc_count, prev_left, fixed;
Andi Kleenda3e6062015-02-27 09:48:31 -08001275 u64 pebs, debugctl;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001276 struct cpu_hw_events *cpuc;
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001277 unsigned long flags;
Ingo Molnar1e125672008-12-09 12:18:18 +01001278 int cpu, idx;
1279
Robert Richter948b1bb2010-03-29 18:36:50 +02001280 if (!x86_pmu.num_counters)
Ingo Molnar1e125672008-12-09 12:18:18 +01001281 return;
Ingo Molnar241771e2008-12-03 10:39:53 +01001282
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001283 local_irq_save(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001284
1285 cpu = smp_processor_id();
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001286 cpuc = &per_cpu(cpu_hw_events, cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001287
Robert Richterfaa28ae2009-04-29 12:47:13 +02001288 if (x86_pmu.version >= 2) {
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301289 rdmsrl(MSR_CORE_PERF_GLOBAL_CTRL, ctrl);
1290 rdmsrl(MSR_CORE_PERF_GLOBAL_STATUS, status);
1291 rdmsrl(MSR_CORE_PERF_GLOBAL_OVF_CTRL, overflow);
1292 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR_CTRL, fixed);
Ingo Molnar241771e2008-12-03 10:39:53 +01001293
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301294 pr_info("\n");
1295 pr_info("CPU#%d: ctrl: %016llx\n", cpu, ctrl);
1296 pr_info("CPU#%d: status: %016llx\n", cpu, status);
1297 pr_info("CPU#%d: overflow: %016llx\n", cpu, overflow);
1298 pr_info("CPU#%d: fixed: %016llx\n", cpu, fixed);
Andi Kleen15fde112015-02-27 09:48:32 -08001299 if (x86_pmu.pebs_constraints) {
1300 rdmsrl(MSR_IA32_PEBS_ENABLE, pebs);
1301 pr_info("CPU#%d: pebs: %016llx\n", cpu, pebs);
1302 }
Andi Kleenda3e6062015-02-27 09:48:31 -08001303 if (x86_pmu.lbr_nr) {
1304 rdmsrl(MSR_IA32_DEBUGCTLMSR, debugctl);
1305 pr_info("CPU#%d: debugctl: %016llx\n", cpu, debugctl);
1306 }
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301307 }
Peter Zijlstra7645a242010-03-08 13:51:31 +01001308 pr_info("CPU#%d: active: %016llx\n", cpu, *(u64 *)cpuc->active_mask);
Ingo Molnar241771e2008-12-03 10:39:53 +01001309
Robert Richter948b1bb2010-03-29 18:36:50 +02001310 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter41bf4982011-02-02 17:40:57 +01001311 rdmsrl(x86_pmu_config_addr(idx), pmc_ctrl);
1312 rdmsrl(x86_pmu_event_addr(idx), pmc_count);
Ingo Molnar241771e2008-12-03 10:39:53 +01001313
Tejun Heo245b2e72009-06-24 15:13:48 +09001314 prev_left = per_cpu(pmc_prev_left[idx], cpu);
Ingo Molnar241771e2008-12-03 10:39:53 +01001315
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301316 pr_info("CPU#%d: gen-PMC%d ctrl: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001317 cpu, idx, pmc_ctrl);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301318 pr_info("CPU#%d: gen-PMC%d count: %016llx\n",
Ingo Molnar241771e2008-12-03 10:39:53 +01001319 cpu, idx, pmc_count);
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301320 pr_info("CPU#%d: gen-PMC%d left: %016llx\n",
Ingo Molnaree060942008-12-13 09:00:03 +01001321 cpu, idx, prev_left);
Ingo Molnar241771e2008-12-03 10:39:53 +01001322 }
Robert Richter948b1bb2010-03-29 18:36:50 +02001323 for (idx = 0; idx < x86_pmu.num_counters_fixed; idx++) {
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001324 rdmsrl(MSR_ARCH_PERFMON_FIXED_CTR0 + idx, pmc_count);
1325
Jaswinder Singh Rajputa1ef58f2009-02-28 18:45:39 +05301326 pr_info("CPU#%d: fixed-PMC%d count: %016llx\n",
Ingo Molnar2f18d1e2008-12-22 11:10:42 +01001327 cpu, idx, pmc_count);
1328 }
Peter Zijlstra5bb9efe2009-05-13 08:12:51 +02001329 local_irq_restore(flags);
Ingo Molnar241771e2008-12-03 10:39:53 +01001330}
1331
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001332void x86_pmu_stop(struct perf_event *event, int flags)
Ingo Molnar241771e2008-12-03 10:39:53 +01001333{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001334 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001335 struct hw_perf_event *hwc = &event->hw;
Ingo Molnar241771e2008-12-03 10:39:53 +01001336
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001337 if (__test_and_clear_bit(hwc->idx, cpuc->active_mask)) {
1338 x86_pmu.disable(event);
1339 cpuc->events[hwc->idx] = NULL;
1340 WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
1341 hwc->state |= PERF_HES_STOPPED;
1342 }
Peter Zijlstra71e2d282010-03-08 17:51:33 +01001343
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001344 if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
1345 /*
1346 * Drain the remaining delta count out of a event
1347 * that we are disabling:
1348 */
1349 x86_perf_event_update(event);
1350 hwc->state |= PERF_HES_UPTODATE;
1351 }
Peter Zijlstra2e841872010-01-25 15:58:43 +01001352}
1353
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001354static void x86_pmu_del(struct perf_event *event, int flags)
Peter Zijlstra2e841872010-01-25 15:58:43 +01001355{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001356 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Peter Zijlstra2e841872010-01-25 15:58:43 +01001357 int i;
1358
Stephane Eranian90151c352010-05-25 16:23:10 +02001359 /*
Stephane Eranian2f7f73a2013-06-20 18:42:54 +02001360 * event is descheduled
1361 */
1362 event->hw.flags &= ~PERF_X86_EVENT_COMMITTED;
1363
1364 /*
Peter Zijlstra68f70822016-07-06 18:02:43 +02001365 * If we're called during a txn, we only need to undo x86_pmu.add.
Stephane Eranian90151c352010-05-25 16:23:10 +02001366 * The events never got scheduled and ->cancel_txn will truncate
1367 * the event_list.
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001368 *
1369 * XXX assumes any ->del() called during a TXN will only be on
1370 * an event added during that same TXN.
Stephane Eranian90151c352010-05-25 16:23:10 +02001371 */
Sukadev Bhattiprolu8f3e5682015-09-03 20:07:53 -07001372 if (cpuc->txn_flags & PERF_PMU_TXN_ADD)
Peter Zijlstra68f70822016-07-06 18:02:43 +02001373 goto do_del;
Stephane Eranian90151c352010-05-25 16:23:10 +02001374
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001375 /*
1376 * Not a TXN, therefore cleanup properly.
1377 */
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001378 x86_pmu_stop(event, PERF_EF_UPDATE);
Peter Zijlstra194002b2009-06-22 16:35:24 +02001379
Stephane Eranian1da53e02010-01-18 10:58:01 +02001380 for (i = 0; i < cpuc->n_events; i++) {
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001381 if (event == cpuc->event_list[i])
Peter Zijlstra6c9687a2010-01-25 11:57:25 +01001382 break;
Stephane Eranian1da53e02010-01-18 10:58:01 +02001383 }
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001384
1385 if (WARN_ON_ONCE(i == cpuc->n_events)) /* called ->del() without ->add() ? */
1386 return;
1387
1388 /* If we have a newly added event; make sure to decrease n_added. */
1389 if (i >= cpuc->n_events - cpuc->n_added)
1390 --cpuc->n_added;
1391
1392 if (x86_pmu.put_event_constraints)
1393 x86_pmu.put_event_constraints(cpuc, event);
1394
1395 /* Delete the array entry. */
Peter Zijlstrab371b592015-05-21 10:57:13 +02001396 while (++i < cpuc->n_events) {
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001397 cpuc->event_list[i-1] = cpuc->event_list[i];
Peter Zijlstrab371b592015-05-21 10:57:13 +02001398 cpuc->event_constraint[i-1] = cpuc->event_constraint[i];
1399 }
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001400 --cpuc->n_events;
1401
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001402 perf_event_update_userpage(event);
Peter Zijlstra68f70822016-07-06 18:02:43 +02001403
1404do_del:
1405 if (x86_pmu.del) {
1406 /*
1407 * This is after x86_pmu_stop(); so we disable LBRs after any
1408 * event can need them etc..
1409 */
1410 x86_pmu.del(event);
1411 }
Ingo Molnar241771e2008-12-03 10:39:53 +01001412}
1413
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001414int x86_pmu_handle_irq(struct pt_regs *regs)
Robert Richtera29aa8a2009-04-29 12:47:21 +02001415{
Peter Zijlstradf1a1322009-06-10 21:02:22 +02001416 struct perf_sample_data data;
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001417 struct cpu_hw_events *cpuc;
1418 struct perf_event *event;
Vince Weaver11d15782009-07-08 17:46:14 -04001419 int idx, handled = 0;
Ingo Molnar9029a5e2009-05-15 08:26:20 +02001420 u64 val;
1421
Christoph Lameter89cbc762014-08-17 12:30:40 -05001422 cpuc = this_cpu_ptr(&cpu_hw_events);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001423
Don Zickus2bce5da2011-04-27 06:32:33 -04001424 /*
1425 * Some chipsets need to unmask the LVTPC in a particular spot
1426 * inside the nmi handler. As a result, the unmasking was pushed
1427 * into all the nmi handlers.
1428 *
1429 * This generic handler doesn't seem to have any issues where the
1430 * unmasking occurs so it was left at the top.
1431 */
1432 apic_write(APIC_LVTPC, APIC_DM_NMI);
1433
Robert Richter948b1bb2010-03-29 18:36:50 +02001434 for (idx = 0; idx < x86_pmu.num_counters; idx++) {
Robert Richter63e6be62010-09-15 18:20:34 +02001435 if (!test_bit(idx, cpuc->active_mask)) {
1436 /*
1437 * Though we deactivated the counter some cpus
1438 * might still deliver spurious interrupts still
1439 * in flight. Catch them:
1440 */
1441 if (__test_and_clear_bit(idx, cpuc->running))
1442 handled++;
Robert Richtera29aa8a2009-04-29 12:47:21 +02001443 continue;
Robert Richter63e6be62010-09-15 18:20:34 +02001444 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001445
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001446 event = cpuc->events[idx];
Peter Zijlstraa4016a72009-05-14 14:52:17 +02001447
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001448 val = x86_perf_event_update(event);
Robert Richter948b1bb2010-03-29 18:36:50 +02001449 if (val & (1ULL << (x86_pmu.cntval_bits - 1)))
Peter Zijlstra48e22d52009-05-25 17:39:04 +02001450 continue;
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001451
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001452 /*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001453 * event overflow
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001454 */
Robert Richter4177c422010-09-02 15:07:48 -04001455 handled++;
Robert Richterfd0d0002012-04-02 20:19:08 +02001456 perf_sample_data_init(&data, 0, event->hw.last_period);
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001457
Peter Zijlstra07088ed2010-03-02 20:16:01 +01001458 if (!x86_perf_event_set_period(event))
Peter Zijlstrae4abb5d2009-06-02 16:08:20 +02001459 continue;
1460
Peter Zijlstraa8b0ca12011-06-27 14:41:57 +02001461 if (perf_event_overflow(event, &data, regs))
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02001462 x86_pmu_stop(event, 0);
Robert Richtera29aa8a2009-04-29 12:47:21 +02001463 }
Peter Zijlstra962bf7a2009-05-13 13:21:36 +02001464
Peter Zijlstra9e350de2009-06-10 21:34:59 +02001465 if (handled)
1466 inc_irq_stat(apic_perf_irqs);
1467
Robert Richtera29aa8a2009-04-29 12:47:21 +02001468 return handled;
1469}
Robert Richter39d81ea2009-04-29 12:47:05 +02001470
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001471void perf_events_lapic_init(void)
Ingo Molnar241771e2008-12-03 10:39:53 +01001472{
Ingo Molnar04da8a42009-08-11 10:40:08 +02001473 if (!x86_pmu.apic || !x86_pmu_initialized())
Ingo Molnar241771e2008-12-03 10:39:53 +01001474 return;
Robert Richter85cf9db2009-04-29 12:47:20 +02001475
Ingo Molnar241771e2008-12-03 10:39:53 +01001476 /*
Yong Wangc323d952009-05-29 13:28:35 +08001477 * Always use NMI for PMU
Ingo Molnar241771e2008-12-03 10:39:53 +01001478 */
Yong Wangc323d952009-05-29 13:28:35 +08001479 apic_write(APIC_LVTPC, APIC_DM_NMI);
Ingo Molnar241771e2008-12-03 10:39:53 +01001480}
1481
Masami Hiramatsu93266382014-04-17 17:18:14 +09001482static int
Don Zickus9c48f1c2011-09-30 15:06:21 -04001483perf_event_nmi_handler(unsigned int cmd, struct pt_regs *regs)
Ingo Molnar241771e2008-12-03 10:39:53 +01001484{
Dave Hansen14c63f12013-06-21 08:51:36 -07001485 u64 start_clock;
1486 u64 finish_clock;
Peter Zijlstrae8a923c2013-10-17 15:32:10 +02001487 int ret;
Dave Hansen14c63f12013-06-21 08:51:36 -07001488
Alexander Shishkin1b7b9382015-06-09 13:03:26 +03001489 /*
1490 * All PMUs/events that share this PMI handler should make sure to
1491 * increment active_events for their events.
1492 */
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001493 if (!atomic_read(&active_events))
Don Zickus9c48f1c2011-09-30 15:06:21 -04001494 return NMI_DONE;
Peter Zijlstra63a809a2009-05-01 12:23:17 +02001495
Peter Zijlstrae8a923c2013-10-17 15:32:10 +02001496 start_clock = sched_clock();
Dave Hansen14c63f12013-06-21 08:51:36 -07001497 ret = x86_pmu.handle_irq(regs);
Peter Zijlstrae8a923c2013-10-17 15:32:10 +02001498 finish_clock = sched_clock();
Dave Hansen14c63f12013-06-21 08:51:36 -07001499
1500 perf_sample_event_took(finish_clock - start_clock);
1501
1502 return ret;
Ingo Molnar241771e2008-12-03 10:39:53 +01001503}
Masami Hiramatsu93266382014-04-17 17:18:14 +09001504NOKPROBE_SYMBOL(perf_event_nmi_handler);
Ingo Molnar241771e2008-12-03 10:39:53 +01001505
Kevin Winchesterde0428a2011-08-30 20:41:05 -03001506struct event_constraint emptyconstraint;
1507struct event_constraint unconstrained;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301508
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001509static int x86_pmu_prepare_cpu(unsigned int cpu)
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001510{
Peter Zijlstra7fdba1c2011-07-22 13:41:54 +02001511 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001512 int i;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001513
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001514 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++)
1515 cpuc->kfree_on_online[i] = NULL;
1516 if (x86_pmu.cpu_prepare)
1517 return x86_pmu.cpu_prepare(cpu);
1518 return 0;
1519}
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001520
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001521static int x86_pmu_dead_cpu(unsigned int cpu)
1522{
1523 if (x86_pmu.cpu_dead)
1524 x86_pmu.cpu_dead(cpu);
1525 return 0;
1526}
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001527
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001528static int x86_pmu_online_cpu(unsigned int cpu)
1529{
1530 struct cpu_hw_events *cpuc = &per_cpu(cpu_hw_events, cpu);
1531 int i;
Peter Zijlstra7fdba1c2011-07-22 13:41:54 +02001532
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001533 for (i = 0 ; i < X86_PERF_KFREE_MAX; i++) {
1534 kfree(cpuc->kfree_on_online[i]);
1535 cpuc->kfree_on_online[i] = NULL;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001536 }
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001537 return 0;
1538}
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001539
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001540static int x86_pmu_starting_cpu(unsigned int cpu)
1541{
1542 if (x86_pmu.cpu_starting)
1543 x86_pmu.cpu_starting(cpu);
1544 return 0;
1545}
1546
1547static int x86_pmu_dying_cpu(unsigned int cpu)
1548{
1549 if (x86_pmu.cpu_dying)
1550 x86_pmu.cpu_dying(cpu);
1551 return 0;
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001552}
1553
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001554static void __init pmu_check_apic(void)
1555{
Borislav Petkov93984fb2016-04-04 22:25:00 +02001556 if (boot_cpu_has(X86_FEATURE_APIC))
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001557 return;
1558
1559 x86_pmu.apic = 0;
1560 pr_info("no APIC, boot with the \"lapic\" boot parameter to force-enable it.\n");
1561 pr_info("no hardware sampling interrupt available.\n");
Vince Weaverc184c982014-05-16 17:18:07 -04001562
1563 /*
1564 * If we have a PMU initialized but no APIC
1565 * interrupts, we cannot sample hardware
1566 * events (user-space has to fall back and
1567 * sample via a hrtimer based software event):
1568 */
1569 pmu.capabilities |= PERF_PMU_CAP_NO_INTERRUPT;
1570
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001571}
1572
Jiri Olsa641cc932012-03-15 20:09:14 +01001573static struct attribute_group x86_pmu_format_group = {
1574 .name = "format",
1575 .attrs = NULL,
1576};
1577
Jiri Olsa8300daa2012-10-10 14:53:12 +02001578/*
1579 * Remove all undefined events (x86_pmu.event_map(id) == 0)
1580 * out of events_attr attributes.
1581 */
1582static void __init filter_events(struct attribute **attrs)
1583{
Stephane Eranian3a54aaa2013-01-24 16:10:26 +01001584 struct device_attribute *d;
1585 struct perf_pmu_events_attr *pmu_attr;
Stephane Eranian61b87ca2015-12-07 20:33:25 +01001586 int offset = 0;
Jiri Olsa8300daa2012-10-10 14:53:12 +02001587 int i, j;
1588
1589 for (i = 0; attrs[i]; i++) {
Stephane Eranian3a54aaa2013-01-24 16:10:26 +01001590 d = (struct device_attribute *)attrs[i];
1591 pmu_attr = container_of(d, struct perf_pmu_events_attr, attr);
1592 /* str trumps id */
1593 if (pmu_attr->event_str)
1594 continue;
Stephane Eranian61b87ca2015-12-07 20:33:25 +01001595 if (x86_pmu.event_map(i + offset))
Jiri Olsa8300daa2012-10-10 14:53:12 +02001596 continue;
1597
1598 for (j = i; attrs[j]; j++)
1599 attrs[j] = attrs[j + 1];
1600
1601 /* Check the shifted attr. */
1602 i--;
Stephane Eranian61b87ca2015-12-07 20:33:25 +01001603
1604 /*
1605 * event_map() is index based, the attrs array is organized
1606 * by increasing event index. If we shift the events, then
1607 * we need to compensate for the event_map(), otherwise
1608 * we are looking up the wrong event in the map
1609 */
1610 offset++;
Jiri Olsa8300daa2012-10-10 14:53:12 +02001611 }
1612}
1613
Andi Kleen1a6461b2013-01-24 16:10:25 +01001614/* Merge two pointer arrays */
Andi Kleen47732d82015-06-29 14:22:13 -07001615__init struct attribute **merge_attr(struct attribute **a, struct attribute **b)
Andi Kleen1a6461b2013-01-24 16:10:25 +01001616{
1617 struct attribute **new;
1618 int j, i;
1619
1620 for (j = 0; a[j]; j++)
1621 ;
1622 for (i = 0; b[i]; i++)
1623 j++;
1624 j++;
1625
1626 new = kmalloc(sizeof(struct attribute *) * j, GFP_KERNEL);
1627 if (!new)
1628 return NULL;
1629
1630 j = 0;
1631 for (i = 0; a[i]; i++)
1632 new[j++] = a[i];
1633 for (i = 0; b[i]; i++)
1634 new[j++] = b[i];
1635 new[j] = NULL;
1636
1637 return new;
1638}
1639
Huang Ruic7ab62b2016-03-09 13:45:06 +08001640ssize_t events_sysfs_show(struct device *dev, struct device_attribute *attr, char *page)
Jiri Olsaa4747392012-10-10 14:53:11 +02001641{
1642 struct perf_pmu_events_attr *pmu_attr = \
1643 container_of(attr, struct perf_pmu_events_attr, attr);
Jiri Olsaa4747392012-10-10 14:53:11 +02001644 u64 config = x86_pmu.event_map(pmu_attr->id);
Stephane Eranian3a54aaa2013-01-24 16:10:26 +01001645
1646 /* string trumps id */
1647 if (pmu_attr->event_str)
1648 return sprintf(page, "%s", pmu_attr->event_str);
1649
Jiri Olsaa4747392012-10-10 14:53:11 +02001650 return x86_pmu.events_sysfs_show(page, config);
1651}
Huang Ruic7ab62b2016-03-09 13:45:06 +08001652EXPORT_SYMBOL_GPL(events_sysfs_show);
Jiri Olsaa4747392012-10-10 14:53:11 +02001653
Andi Kleenfc07e9f2016-05-19 17:09:56 -07001654ssize_t events_ht_sysfs_show(struct device *dev, struct device_attribute *attr,
1655 char *page)
1656{
1657 struct perf_pmu_events_ht_attr *pmu_attr =
1658 container_of(attr, struct perf_pmu_events_ht_attr, attr);
1659
1660 /*
1661 * Report conditional events depending on Hyper-Threading.
1662 *
1663 * This is overly conservative as usually the HT special
1664 * handling is not needed if the other CPU thread is idle.
1665 *
1666 * Note this does not (and cannot) handle the case when thread
1667 * siblings are invisible, for example with virtualization
1668 * if they are owned by some other guest. The user tool
1669 * has to re-read when a thread sibling gets onlined later.
1670 */
1671 return sprintf(page, "%s",
1672 topology_max_smt_threads() > 1 ?
1673 pmu_attr->event_str_ht :
1674 pmu_attr->event_str_noht);
1675}
1676
Jiri Olsaa4747392012-10-10 14:53:11 +02001677EVENT_ATTR(cpu-cycles, CPU_CYCLES );
1678EVENT_ATTR(instructions, INSTRUCTIONS );
1679EVENT_ATTR(cache-references, CACHE_REFERENCES );
1680EVENT_ATTR(cache-misses, CACHE_MISSES );
1681EVENT_ATTR(branch-instructions, BRANCH_INSTRUCTIONS );
1682EVENT_ATTR(branch-misses, BRANCH_MISSES );
1683EVENT_ATTR(bus-cycles, BUS_CYCLES );
1684EVENT_ATTR(stalled-cycles-frontend, STALLED_CYCLES_FRONTEND );
1685EVENT_ATTR(stalled-cycles-backend, STALLED_CYCLES_BACKEND );
1686EVENT_ATTR(ref-cycles, REF_CPU_CYCLES );
1687
1688static struct attribute *empty_attrs;
1689
Peter Huewe95d18aa2012-10-29 21:48:17 +01001690static struct attribute *events_attr[] = {
Jiri Olsaa4747392012-10-10 14:53:11 +02001691 EVENT_PTR(CPU_CYCLES),
1692 EVENT_PTR(INSTRUCTIONS),
1693 EVENT_PTR(CACHE_REFERENCES),
1694 EVENT_PTR(CACHE_MISSES),
1695 EVENT_PTR(BRANCH_INSTRUCTIONS),
1696 EVENT_PTR(BRANCH_MISSES),
1697 EVENT_PTR(BUS_CYCLES),
1698 EVENT_PTR(STALLED_CYCLES_FRONTEND),
1699 EVENT_PTR(STALLED_CYCLES_BACKEND),
1700 EVENT_PTR(REF_CPU_CYCLES),
1701 NULL,
1702};
1703
1704static struct attribute_group x86_pmu_events_group = {
1705 .name = "events",
1706 .attrs = events_attr,
1707};
1708
Jiri Olsa0bf79d42012-10-10 14:53:14 +02001709ssize_t x86_event_sysfs_show(char *page, u64 config, u64 event)
Jiri Olsa43c032f2012-10-10 14:53:13 +02001710{
Jiri Olsa43c032f2012-10-10 14:53:13 +02001711 u64 umask = (config & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
1712 u64 cmask = (config & ARCH_PERFMON_EVENTSEL_CMASK) >> 24;
1713 bool edge = (config & ARCH_PERFMON_EVENTSEL_EDGE);
1714 bool pc = (config & ARCH_PERFMON_EVENTSEL_PIN_CONTROL);
1715 bool any = (config & ARCH_PERFMON_EVENTSEL_ANY);
1716 bool inv = (config & ARCH_PERFMON_EVENTSEL_INV);
1717 ssize_t ret;
1718
1719 /*
1720 * We have whole page size to spend and just little data
1721 * to write, so we can safely use sprintf.
1722 */
1723 ret = sprintf(page, "event=0x%02llx", event);
1724
1725 if (umask)
1726 ret += sprintf(page + ret, ",umask=0x%02llx", umask);
1727
1728 if (edge)
1729 ret += sprintf(page + ret, ",edge");
1730
1731 if (pc)
1732 ret += sprintf(page + ret, ",pc");
1733
1734 if (any)
1735 ret += sprintf(page + ret, ",any");
1736
1737 if (inv)
1738 ret += sprintf(page + ret, ",inv");
1739
1740 if (cmask)
1741 ret += sprintf(page + ret, ",cmask=0x%02llx", cmask);
1742
1743 ret += sprintf(page + ret, "\n");
1744
1745 return ret;
1746}
1747
Yinghai Ludda99112011-01-21 15:30:01 -08001748static int __init init_hw_perf_events(void)
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301749{
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01001750 struct x86_pmu_quirk *quirk;
Robert Richter72eae042009-04-29 12:47:10 +02001751 int err;
1752
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001753 pr_info("Performance Events: ");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001754
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301755 switch (boot_cpu_data.x86_vendor) {
1756 case X86_VENDOR_INTEL:
Robert Richter72eae042009-04-29 12:47:10 +02001757 err = intel_pmu_init();
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301758 break;
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301759 case X86_VENDOR_AMD:
Robert Richter72eae042009-04-29 12:47:10 +02001760 err = amd_pmu_init();
Jaswinder Singh Rajputf87ad352009-02-27 20:15:14 +05301761 break;
Robert Richter41389602009-04-29 12:47:00 +02001762 default:
Ingo Molnar8a3da6c72013-09-28 15:48:48 +02001763 err = -ENOTSUPP;
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301764 }
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001765 if (err != 0) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001766 pr_cont("no PMU driver, software events only.\n");
Peter Zijlstra004417a2010-11-25 18:38:29 +01001767 return 0;
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001768 }
Jaswinder Singh Rajputb56a3802009-02-27 18:09:09 +05301769
Cyrill Gorcunov12558032009-12-10 19:56:34 +03001770 pmu_check_apic();
1771
Don Zickus33c6d6a2010-11-22 16:55:23 -05001772 /* sanity check that the hardware exists or is emulated */
Peter Zijlstra44072042010-12-08 15:56:23 +01001773 if (!check_hw_exists())
Peter Zijlstra004417a2010-11-25 18:38:29 +01001774 return 0;
Don Zickus33c6d6a2010-11-22 16:55:23 -05001775
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001776 pr_cont("%s PMU driver.\n", x86_pmu.name);
Robert Richterfaa28ae2009-04-29 12:47:13 +02001777
Peter Zijlstrae97df762014-02-05 20:48:51 +01001778 x86_pmu.attr_rdpmc = 1; /* enable userspace RDPMC usage by default */
1779
Peter Zijlstrac1d6f422011-12-06 14:07:15 +01001780 for (quirk = x86_pmu.quirks; quirk; quirk = quirk->next)
1781 quirk->func();
Peter Zijlstra3c447802010-03-04 21:49:01 +01001782
Robert Richtera1eac7a2012-06-20 20:46:34 +02001783 if (!x86_pmu.intel_ctrl)
1784 x86_pmu.intel_ctrl = (1 << x86_pmu.num_counters) - 1;
Ingo Molnar862a1a52008-12-17 13:09:20 +01001785
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001786 perf_events_lapic_init();
Don Zickus9c48f1c2011-09-30 15:06:21 -04001787 register_nmi_handler(NMI_LOCAL, perf_event_nmi_handler, 0, "PMI");
Ingo Molnar1123e3a2009-05-29 11:25:09 +02001788
Peter Zijlstra63b14642010-01-22 16:32:17 +01001789 unconstrained = (struct event_constraint)
Robert Richter948b1bb2010-03-29 18:36:50 +02001790 __EVENT_CONSTRAINT(0, (1ULL << x86_pmu.num_counters) - 1,
Stephane Eranian9fac2cf2013-01-24 16:10:27 +01001791 0, x86_pmu.num_counters, 0, 0);
Peter Zijlstra63b14642010-01-22 16:32:17 +01001792
Jiri Olsa641cc932012-03-15 20:09:14 +01001793 x86_pmu_format_group.attrs = x86_pmu.format_attrs;
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01001794
Stephane Eranianf20093e2013-01-24 16:10:32 +01001795 if (x86_pmu.event_attrs)
1796 x86_pmu_events_group.attrs = x86_pmu.event_attrs;
1797
Jiri Olsaa4747392012-10-10 14:53:11 +02001798 if (!x86_pmu.events_sysfs_show)
1799 x86_pmu_events_group.attrs = &empty_attrs;
Jiri Olsa8300daa2012-10-10 14:53:12 +02001800 else
1801 filter_events(x86_pmu_events_group.attrs);
Jiri Olsaa4747392012-10-10 14:53:11 +02001802
Andi Kleen1a6461b2013-01-24 16:10:25 +01001803 if (x86_pmu.cpu_events) {
1804 struct attribute **tmp;
1805
1806 tmp = merge_attr(x86_pmu_events_group.attrs, x86_pmu.cpu_events);
1807 if (!WARN_ON(!tmp))
1808 x86_pmu_events_group.attrs = tmp;
1809 }
1810
Ingo Molnar57c0c152009-09-21 12:20:38 +02001811 pr_info("... version: %d\n", x86_pmu.version);
Robert Richter948b1bb2010-03-29 18:36:50 +02001812 pr_info("... bit width: %d\n", x86_pmu.cntval_bits);
1813 pr_info("... generic registers: %d\n", x86_pmu.num_counters);
1814 pr_info("... value mask: %016Lx\n", x86_pmu.cntval_mask);
Ingo Molnar57c0c152009-09-21 12:20:38 +02001815 pr_info("... max period: %016Lx\n", x86_pmu.max_period);
Robert Richter948b1bb2010-03-29 18:36:50 +02001816 pr_info("... fixed-purpose events: %d\n", x86_pmu.num_counters_fixed);
Robert Richterd6dc0b42010-03-17 12:49:13 +01001817 pr_info("... event mask: %016Lx\n", x86_pmu.intel_ctrl);
Peter Zijlstra3f6da392010-03-05 13:01:18 +01001818
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001819 /*
1820 * Install callbacks. Core will call them for each online
1821 * cpu.
1822 */
Thomas Gleixner73c1b412016-12-21 20:19:54 +01001823 err = cpuhp_setup_state(CPUHP_PERF_X86_PREPARE, "perf/x86:prepare",
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001824 x86_pmu_prepare_cpu, x86_pmu_dead_cpu);
1825 if (err)
1826 return err;
1827
1828 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_STARTING,
Thomas Gleixner73c1b412016-12-21 20:19:54 +01001829 "perf/x86:starting", x86_pmu_starting_cpu,
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001830 x86_pmu_dying_cpu);
1831 if (err)
1832 goto out;
1833
Thomas Gleixner73c1b412016-12-21 20:19:54 +01001834 err = cpuhp_setup_state(CPUHP_AP_PERF_X86_ONLINE, "perf/x86:online",
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001835 x86_pmu_online_cpu, NULL);
1836 if (err)
1837 goto out1;
1838
1839 err = perf_pmu_register(&pmu, "cpu", PERF_TYPE_RAW);
1840 if (err)
1841 goto out2;
Peter Zijlstra004417a2010-11-25 18:38:29 +01001842
1843 return 0;
Thomas Gleixner95ca7922016-07-13 17:16:10 +00001844
1845out2:
1846 cpuhp_remove_state(CPUHP_AP_PERF_X86_ONLINE);
1847out1:
1848 cpuhp_remove_state(CPUHP_AP_PERF_X86_STARTING);
1849out:
1850 cpuhp_remove_state(CPUHP_PERF_X86_PREPARE);
1851 return err;
Ingo Molnar241771e2008-12-03 10:39:53 +01001852}
Peter Zijlstra004417a2010-11-25 18:38:29 +01001853early_initcall(init_hw_perf_events);
Ingo Molnar621a01e2008-12-11 12:46:46 +01001854
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001855static inline void x86_pmu_read(struct perf_event *event)
Ingo Molnaree060942008-12-13 09:00:03 +01001856{
Peter Zijlstracc2ad4b2010-03-02 20:18:39 +01001857 x86_perf_event_update(event);
Ingo Molnaree060942008-12-13 09:00:03 +01001858}
1859
Lin Ming4d1c52b2010-04-23 13:56:12 +08001860/*
1861 * Start group events scheduling transaction
1862 * Set the flag to make pmu::enable() not perform the
1863 * schedulability test, it will be performed at commit time
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001864 *
1865 * We only support PERF_PMU_TXN_ADD transactions. Save the
1866 * transaction flags but otherwise ignore non-PERF_PMU_TXN_ADD
1867 * transactions.
Lin Ming4d1c52b2010-04-23 13:56:12 +08001868 */
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001869static void x86_pmu_start_txn(struct pmu *pmu, unsigned int txn_flags)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001870{
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001871 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1872
1873 WARN_ON_ONCE(cpuc->txn_flags); /* txn already in flight */
1874
1875 cpuc->txn_flags = txn_flags;
1876 if (txn_flags & ~PERF_PMU_TXN_ADD)
1877 return;
1878
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001879 perf_pmu_disable(pmu);
Tejun Heo0a3aee02010-12-18 16:28:55 +01001880 __this_cpu_write(cpu_hw_events.n_txn, 0);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001881}
1882
1883/*
1884 * Stop group events scheduling transaction
1885 * Clear the flag and pmu::enable() will perform the
1886 * schedulability test.
1887 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001888static void x86_pmu_cancel_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001889{
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001890 unsigned int txn_flags;
1891 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
1892
1893 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1894
1895 txn_flags = cpuc->txn_flags;
1896 cpuc->txn_flags = 0;
1897 if (txn_flags & ~PERF_PMU_TXN_ADD)
1898 return;
1899
Stephane Eranian90151c352010-05-25 16:23:10 +02001900 /*
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001901 * Truncate collected array by the number of events added in this
1902 * transaction. See x86_pmu_add() and x86_pmu_*_txn().
Stephane Eranian90151c352010-05-25 16:23:10 +02001903 */
Tejun Heo0a3aee02010-12-18 16:28:55 +01001904 __this_cpu_sub(cpu_hw_events.n_added, __this_cpu_read(cpu_hw_events.n_txn));
1905 __this_cpu_sub(cpu_hw_events.n_events, __this_cpu_read(cpu_hw_events.n_txn));
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001906 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001907}
1908
1909/*
1910 * Commit group events scheduling transaction
1911 * Perform the group schedulability test as a whole
1912 * Return 0 if success
Peter Zijlstrac347a2f2014-02-24 12:26:21 +01001913 *
1914 * Does not cancel the transaction on failure; expects the caller to do this.
Lin Ming4d1c52b2010-04-23 13:56:12 +08001915 */
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02001916static int x86_pmu_commit_txn(struct pmu *pmu)
Lin Ming4d1c52b2010-04-23 13:56:12 +08001917{
Christoph Lameter89cbc762014-08-17 12:30:40 -05001918 struct cpu_hw_events *cpuc = this_cpu_ptr(&cpu_hw_events);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001919 int assign[X86_PMC_IDX_MAX];
1920 int n, ret;
1921
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001922 WARN_ON_ONCE(!cpuc->txn_flags); /* no txn in flight */
1923
1924 if (cpuc->txn_flags & ~PERF_PMU_TXN_ADD) {
1925 cpuc->txn_flags = 0;
1926 return 0;
1927 }
1928
Lin Ming4d1c52b2010-04-23 13:56:12 +08001929 n = cpuc->n_events;
1930
1931 if (!x86_pmu_initialized())
1932 return -EAGAIN;
1933
1934 ret = x86_pmu.schedule_events(cpuc, n, assign);
1935 if (ret)
1936 return ret;
1937
1938 /*
1939 * copy new assignment, now we know it is possible
1940 * will be used by hw_perf_enable()
1941 */
1942 memcpy(cpuc->assign, assign, n*sizeof(int));
1943
Sukadev Bhattiprolufbbe0702015-09-03 20:07:45 -07001944 cpuc->txn_flags = 0;
Peter Zijlstra33696fc2010-06-14 08:49:00 +02001945 perf_pmu_enable(pmu);
Lin Ming4d1c52b2010-04-23 13:56:12 +08001946 return 0;
1947}
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001948/*
1949 * a fake_cpuc is used to validate event groups. Due to
1950 * the extra reg logic, we need to also allocate a fake
1951 * per_core and per_cpu structure. Otherwise, group events
1952 * using extra reg may conflict without the kernel being
1953 * able to catch this when the last event gets added to
1954 * the group.
1955 */
1956static void free_fake_cpuc(struct cpu_hw_events *cpuc)
1957{
1958 kfree(cpuc->shared_regs);
1959 kfree(cpuc);
1960}
1961
1962static struct cpu_hw_events *allocate_fake_cpuc(void)
1963{
1964 struct cpu_hw_events *cpuc;
1965 int cpu = raw_smp_processor_id();
1966
1967 cpuc = kzalloc(sizeof(*cpuc), GFP_KERNEL);
1968 if (!cpuc)
1969 return ERR_PTR(-ENOMEM);
1970
1971 /* only needed, if we have extra_regs */
1972 if (x86_pmu.extra_regs) {
1973 cpuc->shared_regs = allocate_shared_regs(cpu);
1974 if (!cpuc->shared_regs)
1975 goto error;
1976 }
Peter Zijlstrab430f7c2012-06-05 15:30:31 +02001977 cpuc->is_fake = 1;
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001978 return cpuc;
1979error:
1980 free_fake_cpuc(cpuc);
1981 return ERR_PTR(-ENOMEM);
1982}
Lin Ming4d1c52b2010-04-23 13:56:12 +08001983
Stephane Eranian1da53e02010-01-18 10:58:01 +02001984/*
Peter Zijlstraca037702010-03-02 19:52:12 +01001985 * validate that we can schedule this event
1986 */
1987static int validate_event(struct perf_event *event)
1988{
1989 struct cpu_hw_events *fake_cpuc;
1990 struct event_constraint *c;
1991 int ret = 0;
1992
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02001993 fake_cpuc = allocate_fake_cpuc();
1994 if (IS_ERR(fake_cpuc))
1995 return PTR_ERR(fake_cpuc);
Peter Zijlstraca037702010-03-02 19:52:12 +01001996
Stephane Eranian79cba822014-11-17 20:06:56 +01001997 c = x86_pmu.get_event_constraints(fake_cpuc, -1, event);
Peter Zijlstraca037702010-03-02 19:52:12 +01001998
1999 if (!c || !c->weight)
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +01002000 ret = -EINVAL;
Peter Zijlstraca037702010-03-02 19:52:12 +01002001
2002 if (x86_pmu.put_event_constraints)
2003 x86_pmu.put_event_constraints(fake_cpuc, event);
2004
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02002005 free_fake_cpuc(fake_cpuc);
Peter Zijlstraca037702010-03-02 19:52:12 +01002006
2007 return ret;
2008}
2009
2010/*
Stephane Eranian1da53e02010-01-18 10:58:01 +02002011 * validate a single event group
2012 *
2013 * validation include:
Ingo Molnar184f4122010-01-27 08:39:39 +01002014 * - check events are compatible which each other
2015 * - events do not compete for the same counter
2016 * - number of events <= number of counters
Stephane Eranian1da53e02010-01-18 10:58:01 +02002017 *
2018 * validation ensures the group can be loaded onto the
2019 * PMU if it was the only group available.
2020 */
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002021static int validate_group(struct perf_event *event)
2022{
Stephane Eranian1da53e02010-01-18 10:58:01 +02002023 struct perf_event *leader = event->group_leader;
Peter Zijlstra502568d2010-01-22 14:35:46 +01002024 struct cpu_hw_events *fake_cpuc;
Peter Zijlstraaa2bc1a2011-11-09 17:56:37 +01002025 int ret = -EINVAL, n;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002026
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02002027 fake_cpuc = allocate_fake_cpuc();
2028 if (IS_ERR(fake_cpuc))
2029 return PTR_ERR(fake_cpuc);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002030 /*
2031 * the event is not yet connected with its
2032 * siblings therefore we must first collect
2033 * existing siblings, then add the new event
2034 * before we can simulate the scheduling
2035 */
Peter Zijlstra502568d2010-01-22 14:35:46 +01002036 n = collect_events(fake_cpuc, leader, true);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002037 if (n < 0)
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02002038 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002039
Peter Zijlstra502568d2010-01-22 14:35:46 +01002040 fake_cpuc->n_events = n;
2041 n = collect_events(fake_cpuc, event, false);
Stephane Eranian1da53e02010-01-18 10:58:01 +02002042 if (n < 0)
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02002043 goto out;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002044
Peter Zijlstra502568d2010-01-22 14:35:46 +01002045 fake_cpuc->n_events = n;
Stephane Eranian1da53e02010-01-18 10:58:01 +02002046
Cyrill Gorcunova0727382010-03-11 19:54:39 +03002047 ret = x86_pmu.schedule_events(fake_cpuc, n, NULL);
Peter Zijlstra502568d2010-01-22 14:35:46 +01002048
Peter Zijlstra502568d2010-01-22 14:35:46 +01002049out:
Stephane Eraniancd8a38d2011-06-06 16:57:08 +02002050 free_fake_cpuc(fake_cpuc);
Peter Zijlstra502568d2010-01-22 14:35:46 +01002051 return ret;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002052}
2053
Yinghai Ludda99112011-01-21 15:30:01 -08002054static int x86_pmu_event_init(struct perf_event *event)
Ingo Molnar621a01e2008-12-11 12:46:46 +01002055{
Peter Zijlstra51b0fe32010-06-11 13:35:57 +02002056 struct pmu *tmp;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002057 int err;
2058
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02002059 switch (event->attr.type) {
2060 case PERF_TYPE_RAW:
2061 case PERF_TYPE_HARDWARE:
2062 case PERF_TYPE_HW_CACHE:
2063 break;
2064
2065 default:
2066 return -ENOENT;
2067 }
2068
2069 err = __x86_pmu_event_init(event);
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002070 if (!err) {
Stephane Eranian81130702010-01-21 17:39:01 +02002071 /*
2072 * we temporarily connect event to its pmu
2073 * such that validate_group() can classify
2074 * it as an x86 event using is_x86_event()
2075 */
2076 tmp = event->pmu;
2077 event->pmu = &pmu;
2078
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002079 if (event->group_leader != event)
2080 err = validate_group(event);
Peter Zijlstraca037702010-03-02 19:52:12 +01002081 else
2082 err = validate_event(event);
Stephane Eranian81130702010-01-21 17:39:01 +02002083
2084 event->pmu = tmp;
Peter Zijlstrafe9081c2009-10-08 11:56:07 +02002085 }
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002086 if (err) {
Ingo Molnarcdd6c482009-09-21 12:02:48 +02002087 if (event->destroy)
2088 event->destroy(event);
Peter Zijlstraa1792cdac2009-09-09 10:04:47 +02002089 }
Ingo Molnar621a01e2008-12-11 12:46:46 +01002090
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002091 if (ACCESS_ONCE(x86_pmu.attr_rdpmc))
2092 event->hw.flags |= PERF_X86_EVENT_RDPMC_ALLOWED;
2093
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02002094 return err;
Ingo Molnar621a01e2008-12-11 12:46:46 +01002095}
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002096
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002097static void refresh_pce(void *ignored)
2098{
2099 if (current->mm)
2100 load_mm_cr4(current->mm);
2101}
2102
2103static void x86_pmu_event_mapped(struct perf_event *event)
2104{
2105 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2106 return;
2107
2108 if (atomic_inc_return(&current->mm->context.perf_rdpmc_allowed) == 1)
2109 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2110}
2111
2112static void x86_pmu_event_unmapped(struct perf_event *event)
2113{
2114 if (!current->mm)
2115 return;
2116
2117 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
2118 return;
2119
2120 if (atomic_dec_and_test(&current->mm->context.perf_rdpmc_allowed))
2121 on_each_cpu_mask(mm_cpumask(current->mm), refresh_pce, NULL, 1);
2122}
2123
Peter Zijlstrafe4a3302011-11-20 20:44:06 +01002124static int x86_pmu_event_idx(struct perf_event *event)
2125{
2126 int idx = event->hw.idx;
2127
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002128 if (!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED))
Peter Zijlstrac7206202012-03-22 17:26:36 +01002129 return 0;
2130
Robert Richter15c7ad52012-06-20 20:46:33 +02002131 if (x86_pmu.num_counters_fixed && idx >= INTEL_PMC_IDX_FIXED) {
2132 idx -= INTEL_PMC_IDX_FIXED;
Peter Zijlstrafe4a3302011-11-20 20:44:06 +01002133 idx |= 1 << 30;
2134 }
2135
2136 return idx + 1;
2137}
2138
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002139static ssize_t get_attr_rdpmc(struct device *cdev,
2140 struct device_attribute *attr,
2141 char *buf)
2142{
2143 return snprintf(buf, 40, "%d\n", x86_pmu.attr_rdpmc);
2144}
2145
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002146static ssize_t set_attr_rdpmc(struct device *cdev,
2147 struct device_attribute *attr,
2148 const char *buf, size_t count)
2149{
Shuah Khane2b297f2012-06-10 21:13:41 -06002150 unsigned long val;
2151 ssize_t ret;
2152
2153 ret = kstrtoul(buf, 0, &val);
2154 if (ret)
2155 return ret;
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002156
Andy Lutomirskia6673422014-10-24 15:58:13 -07002157 if (val > 2)
2158 return -EINVAL;
2159
Peter Zijlstrae97df762014-02-05 20:48:51 +01002160 if (x86_pmu.attr_rdpmc_broken)
2161 return -ENOTSUPP;
2162
Andy Lutomirskia6673422014-10-24 15:58:13 -07002163 if ((val == 2) != (x86_pmu.attr_rdpmc == 2)) {
2164 /*
2165 * Changing into or out of always available, aka
2166 * perf-event-bypassing mode. This path is extremely slow,
2167 * but only root can trigger it, so it's okay.
2168 */
2169 if (val == 2)
2170 static_key_slow_inc(&rdpmc_always_available);
2171 else
2172 static_key_slow_dec(&rdpmc_always_available);
2173 on_each_cpu(refresh_pce, NULL, 1);
2174 }
2175
2176 x86_pmu.attr_rdpmc = val;
2177
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002178 return count;
2179}
2180
2181static DEVICE_ATTR(rdpmc, S_IRUSR | S_IWUSR, get_attr_rdpmc, set_attr_rdpmc);
2182
2183static struct attribute *x86_pmu_attrs[] = {
2184 &dev_attr_rdpmc.attr,
2185 NULL,
2186};
2187
2188static struct attribute_group x86_pmu_attr_group = {
2189 .attrs = x86_pmu_attrs,
2190};
2191
2192static const struct attribute_group *x86_pmu_attr_groups[] = {
2193 &x86_pmu_attr_group,
Jiri Olsa641cc932012-03-15 20:09:14 +01002194 &x86_pmu_format_group,
Jiri Olsaa4747392012-10-10 14:53:11 +02002195 &x86_pmu_events_group,
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002196 NULL,
2197};
2198
Yan, Zhengba532502014-11-04 21:55:58 -05002199static void x86_pmu_sched_task(struct perf_event_context *ctx, bool sched_in)
Stephane Eraniand010b332012-02-09 23:21:00 +01002200{
Yan, Zhengba532502014-11-04 21:55:58 -05002201 if (x86_pmu.sched_task)
2202 x86_pmu.sched_task(ctx, sched_in);
Stephane Eraniand010b332012-02-09 23:21:00 +01002203}
2204
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002205void perf_check_microcode(void)
2206{
2207 if (x86_pmu.check_microcode)
2208 x86_pmu.check_microcode();
2209}
2210EXPORT_SYMBOL_GPL(perf_check_microcode);
2211
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02002212static struct pmu pmu = {
Stephane Eraniand010b332012-02-09 23:21:00 +01002213 .pmu_enable = x86_pmu_enable,
2214 .pmu_disable = x86_pmu_disable,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002215
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002216 .attr_groups = x86_pmu_attr_groups,
Peter Zijlstra0c9d42e2011-11-20 23:30:47 +01002217
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002218 .event_init = x86_pmu_event_init,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002219
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002220 .event_mapped = x86_pmu_event_mapped,
2221 .event_unmapped = x86_pmu_event_unmapped,
2222
Stephane Eraniand010b332012-02-09 23:21:00 +01002223 .add = x86_pmu_add,
2224 .del = x86_pmu_del,
2225 .start = x86_pmu_start,
2226 .stop = x86_pmu_stop,
2227 .read = x86_pmu_read,
Peter Zijlstraa4eaf7f2010-06-16 14:37:10 +02002228
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002229 .start_txn = x86_pmu_start_txn,
2230 .cancel_txn = x86_pmu_cancel_txn,
2231 .commit_txn = x86_pmu_commit_txn,
Peter Zijlstrafe4a3302011-11-20 20:44:06 +01002232
Peter Zijlstrac93dc842012-06-08 14:50:50 +02002233 .event_idx = x86_pmu_event_idx,
Yan, Zhengba532502014-11-04 21:55:58 -05002234 .sched_task = x86_pmu_sched_task,
Yan, Zhenge18bf522014-11-04 21:56:03 -05002235 .task_ctx_size = sizeof(struct x86_perf_task_context),
Peter Zijlstrab0a873e2010-06-11 13:35:08 +02002236};
2237
Andy Lutomirskic1317ec2014-10-24 15:58:11 -07002238void arch_perf_update_userpage(struct perf_event *event,
2239 struct perf_event_mmap_page *userpg, u64 now)
Peter Zijlstrae3f35412011-11-21 11:43:53 +01002240{
Peter Zijlstra20d1c862013-11-29 15:40:29 +01002241 struct cyc2ns_data *data;
2242
Peter Zijlstrafa731582013-09-19 10:16:42 +02002243 userpg->cap_user_time = 0;
2244 userpg->cap_user_time_zero = 0;
Andy Lutomirski7911d3f2014-10-24 15:58:12 -07002245 userpg->cap_user_rdpmc =
2246 !!(event->hw.flags & PERF_X86_EVENT_RDPMC_ALLOWED);
Peter Zijlstrac7206202012-03-22 17:26:36 +01002247 userpg->pmc_width = x86_pmu.cntval_bits;
2248
Peter Zijlstra35af99e2013-11-28 19:38:42 +01002249 if (!sched_clock_stable())
Peter Zijlstrae3f35412011-11-21 11:43:53 +01002250 return;
2251
Peter Zijlstra20d1c862013-11-29 15:40:29 +01002252 data = cyc2ns_read_begin();
2253
Peter Zijlstra34f43922015-02-20 14:05:38 +01002254 /*
2255 * Internal timekeeping for enabled/running/stopped times
2256 * is always in the local_clock domain.
2257 */
Peter Zijlstrafa731582013-09-19 10:16:42 +02002258 userpg->cap_user_time = 1;
Peter Zijlstra20d1c862013-11-29 15:40:29 +01002259 userpg->time_mult = data->cyc2ns_mul;
2260 userpg->time_shift = data->cyc2ns_shift;
2261 userpg->time_offset = data->cyc2ns_offset - now;
Adrian Hunterc73deb62013-06-28 16:22:18 +03002262
Peter Zijlstra34f43922015-02-20 14:05:38 +01002263 /*
2264 * cap_user_time_zero doesn't make sense when we're using a different
2265 * time base for the records.
2266 */
Alexander Shishkinf454bfd2016-04-14 14:59:49 +03002267 if (!event->attr.use_clockid) {
Peter Zijlstra34f43922015-02-20 14:05:38 +01002268 userpg->cap_user_time_zero = 1;
2269 userpg->time_zero = data->cyc2ns_offset;
2270 }
Peter Zijlstra20d1c862013-11-29 15:40:29 +01002271
2272 cyc2ns_read_end(data);
Peter Zijlstrae3f35412011-11-21 11:43:53 +01002273}
2274
Frederic Weisbecker56962b4442010-06-30 23:03:51 +02002275void
Arnaldo Carvalho de Melocfbcf462016-04-28 12:30:53 -03002276perf_callchain_kernel(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002277{
Josh Poimboeuf35f4d9b2016-09-16 14:18:13 -05002278 struct unwind_state state;
2279 unsigned long addr;
2280
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002281 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2282 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02002283 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002284 }
2285
Josh Poimboeuf019e5792016-08-24 11:50:14 -05002286 if (perf_callchain_store(entry, regs->ip))
2287 return;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002288
Josh Poimboeuf35f4d9b2016-09-16 14:18:13 -05002289 for (unwind_start(&state, current, regs, NULL); !unwind_done(&state);
2290 unwind_next_frame(&state)) {
2291 addr = unwind_get_return_address(&state);
2292 if (!addr || perf_callchain_store(entry, addr))
2293 return;
2294 }
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002295}
2296
Arun Sharmabc6ca7b2012-04-20 15:41:35 -07002297static inline int
2298valid_user_frame(const void __user *fp, unsigned long size)
2299{
2300 return (__range_not_ok(fp, size, TASK_SIZE) == 0);
2301}
2302
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002303static unsigned long get_segment_base(unsigned int segment)
2304{
2305 struct desc_struct *desc;
Thomas Gleixner990e9dc2016-12-10 00:13:51 +01002306 unsigned int idx = segment >> 3;
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002307
2308 if ((segment & SEGMENT_TI_MASK) == SEGMENT_LDT) {
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -07002309#ifdef CONFIG_MODIFY_LDT_SYSCALL
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002310 struct ldt_struct *ldt;
2311
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002312 if (idx > LDT_ENTRIES)
2313 return 0;
2314
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002315 /* IRQs are off, so this synchronizes with smp_store_release */
2316 ldt = lockless_dereference(current->active_mm->context.ldt);
2317 if (!ldt || idx > ldt->size)
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002318 return 0;
2319
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002320 desc = &ldt->entries[idx];
Andy Lutomirskia5b9e5a2015-07-30 14:31:34 -07002321#else
2322 return 0;
2323#endif
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002324 } else {
2325 if (idx > GDT_ENTRIES)
2326 return 0;
2327
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002328 desc = raw_cpu_ptr(gdt_page.gdt) + idx;
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002329 }
2330
Andy Lutomirski37868fe2015-07-30 14:31:32 -07002331 return get_desc_base(desc);
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002332}
2333
Brian Gerst10ed3492015-06-22 07:55:17 -04002334#ifdef CONFIG_IA32_EMULATION
H. Peter Anvind1a797f2012-02-19 10:06:34 -08002335
2336#include <asm/compat.h>
2337
Torok Edwin257ef9d2010-03-17 12:07:16 +02002338static inline int
Arnaldo Carvalho de Melocfbcf462016-04-28 12:30:53 -03002339perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002340{
Torok Edwin257ef9d2010-03-17 12:07:16 +02002341 /* 32-bit process in 64-bit kernel. */
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002342 unsigned long ss_base, cs_base;
Torok Edwin257ef9d2010-03-17 12:07:16 +02002343 struct stack_frame_ia32 frame;
2344 const void __user *fp;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002345
Torok Edwin257ef9d2010-03-17 12:07:16 +02002346 if (!test_thread_flag(TIF_IA32))
2347 return 0;
Peter Zijlstra74193ef2009-06-15 13:07:24 +02002348
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002349 cs_base = get_segment_base(regs->cs);
2350 ss_base = get_segment_base(regs->ss);
2351
2352 fp = compat_ptr(ss_base + regs->bp);
Andi Kleen75925e12015-10-22 15:07:21 -07002353 pagefault_disable();
Arnaldo Carvalho de Melo3b1fff02016-05-10 18:08:32 -03002354 while (entry->nr < entry->max_stack) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02002355 unsigned long bytes;
2356 frame.next_frame = 0;
2357 frame.return_address = 0;
2358
Johannes Weinerae31fe52016-11-22 10:57:42 +01002359 if (!valid_user_frame(fp, sizeof(frame)))
Andi Kleen75925e12015-10-22 15:07:21 -07002360 break;
2361
2362 bytes = __copy_from_user_nmi(&frame.next_frame, fp, 4);
2363 if (bytes != 0)
2364 break;
2365 bytes = __copy_from_user_nmi(&frame.return_address, fp+4, 4);
Peter Zijlstra0a196842013-10-30 21:16:22 +01002366 if (bytes != 0)
Torok Edwin257ef9d2010-03-17 12:07:16 +02002367 break;
2368
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002369 perf_callchain_store(entry, cs_base + frame.return_address);
2370 fp = compat_ptr(ss_base + frame.next_frame);
Torok Edwin257ef9d2010-03-17 12:07:16 +02002371 }
Andi Kleen75925e12015-10-22 15:07:21 -07002372 pagefault_enable();
Torok Edwin257ef9d2010-03-17 12:07:16 +02002373 return 1;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002374}
Torok Edwin257ef9d2010-03-17 12:07:16 +02002375#else
2376static inline int
Arnaldo Carvalho de Melocfbcf462016-04-28 12:30:53 -03002377perf_callchain_user32(struct pt_regs *regs, struct perf_callchain_entry_ctx *entry)
Torok Edwin257ef9d2010-03-17 12:07:16 +02002378{
2379 return 0;
2380}
2381#endif
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002382
Frederic Weisbecker56962b4442010-06-30 23:03:51 +02002383void
Arnaldo Carvalho de Melocfbcf462016-04-28 12:30:53 -03002384perf_callchain_user(struct perf_callchain_entry_ctx *entry, struct pt_regs *regs)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002385{
2386 struct stack_frame frame;
Josh Poimboeuffc188222016-07-01 23:02:05 -05002387 const unsigned long __user *fp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002388
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002389 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
2390 /* TODO: We don't support guest os callchain now */
Peter Zijlstraed805262010-08-20 14:30:41 +02002391 return;
Frederic Weisbecker927c7a92010-07-01 16:20:36 +02002392 }
Ingo Molnar5a6cec32009-05-29 11:25:09 +02002393
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002394 /*
2395 * We don't know what to do with VM86 stacks.. ignore them for now.
2396 */
2397 if (regs->flags & (X86_VM_MASK | PERF_EFLAGS_VM))
2398 return;
2399
Josh Poimboeuffc188222016-07-01 23:02:05 -05002400 fp = (unsigned long __user *)regs->bp;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002401
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02002402 perf_callchain_store(entry, regs->ip);
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002403
Andrey Vagin20afc602011-08-30 12:32:36 +04002404 if (!current->mm)
2405 return;
2406
Torok Edwin257ef9d2010-03-17 12:07:16 +02002407 if (perf_callchain_user32(regs, entry))
2408 return;
2409
Andi Kleen75925e12015-10-22 15:07:21 -07002410 pagefault_disable();
Arnaldo Carvalho de Melo3b1fff02016-05-10 18:08:32 -03002411 while (entry->nr < entry->max_stack) {
Torok Edwin257ef9d2010-03-17 12:07:16 +02002412 unsigned long bytes;
Josh Poimboeuffc188222016-07-01 23:02:05 -05002413
Ingo Molnar038e8362009-06-15 09:57:59 +02002414 frame.next_frame = NULL;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002415 frame.return_address = 0;
2416
Johannes Weinerae31fe52016-11-22 10:57:42 +01002417 if (!valid_user_frame(fp, sizeof(frame)))
Andi Kleen75925e12015-10-22 15:07:21 -07002418 break;
2419
Josh Poimboeuffc188222016-07-01 23:02:05 -05002420 bytes = __copy_from_user_nmi(&frame.next_frame, fp, sizeof(*fp));
Andi Kleen75925e12015-10-22 15:07:21 -07002421 if (bytes != 0)
2422 break;
Josh Poimboeuffc188222016-07-01 23:02:05 -05002423 bytes = __copy_from_user_nmi(&frame.return_address, fp + 1, sizeof(*fp));
Peter Zijlstra0a196842013-10-30 21:16:22 +01002424 if (bytes != 0)
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002425 break;
2426
Frederic Weisbecker70791ce2010-06-29 19:34:05 +02002427 perf_callchain_store(entry, frame.return_address);
Andi Kleen75925e12015-10-22 15:07:21 -07002428 fp = (void __user *)frame.next_frame;
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002429 }
Andi Kleen75925e12015-10-22 15:07:21 -07002430 pagefault_enable();
Peter Zijlstrad7d59fb2009-03-30 19:07:15 +02002431}
2432
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002433/*
2434 * Deal with code segment offsets for the various execution modes:
2435 *
2436 * VM86 - the good olde 16 bit days, where the linear address is
2437 * 20 bits and we use regs->ip + 0x10 * regs->cs.
2438 *
2439 * IA32 - Where we need to look at GDT/LDT segment descriptor tables
2440 * to figure out what the 32bit base address is.
2441 *
2442 * X32 - has TIF_X32 set, but is running in x86_64
2443 *
2444 * X86_64 - CS,DS,SS,ES are all zero based.
2445 */
2446static unsigned long code_segment_base(struct pt_regs *regs)
2447{
2448 /*
Andy Lutomirski383f3af2015-03-18 18:33:30 -07002449 * For IA32 we look at the GDT/LDT segment base to convert the
2450 * effective IP to a linear address.
2451 */
2452
2453#ifdef CONFIG_X86_32
2454 /*
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002455 * If we are in VM86 mode, add the segment offset to convert to a
2456 * linear address.
2457 */
2458 if (regs->flags & X86_VM_MASK)
2459 return 0x10 * regs->cs;
2460
Ingo Molnar55474c42015-03-29 11:02:34 +02002461 if (user_mode(regs) && regs->cs != __USER_CS)
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002462 return get_segment_base(regs->cs);
2463#else
Andy Lutomirskic56716a2015-03-18 18:33:28 -07002464 if (user_mode(regs) && !user_64bit_mode(regs) &&
2465 regs->cs != __USER32_CS)
2466 return get_segment_base(regs->cs);
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002467#endif
2468 return 0;
2469}
2470
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002471unsigned long perf_instruction_pointer(struct pt_regs *regs)
2472{
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002473 if (perf_guest_cbs && perf_guest_cbs->is_in_guest())
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002474 return perf_guest_cbs->get_guest_ip();
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08002475
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002476 return regs->ip + code_segment_base(regs);
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002477}
2478
2479unsigned long perf_misc_flags(struct pt_regs *regs)
2480{
2481 int misc = 0;
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08002482
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002483 if (perf_guest_cbs && perf_guest_cbs->is_in_guest()) {
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08002484 if (perf_guest_cbs->is_user_mode())
2485 misc |= PERF_RECORD_MISC_GUEST_USER;
2486 else
2487 misc |= PERF_RECORD_MISC_GUEST_KERNEL;
2488 } else {
Peter Zijlstrad07bdfd2012-07-10 09:42:15 +02002489 if (user_mode(regs))
Zhang, Yanmindcf46b92010-04-20 10:13:58 +08002490 misc |= PERF_RECORD_MISC_USER;
2491 else
2492 misc |= PERF_RECORD_MISC_KERNEL;
2493 }
2494
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002495 if (regs->flags & PERF_EFLAGS_EXACT)
Peter Zijlstraab608342010-04-08 23:03:20 +02002496 misc |= PERF_RECORD_MISC_EXACT_IP;
Zhang, Yanmin39447b32010-04-19 13:32:41 +08002497
2498 return misc;
2499}
Gleb Natapovb3d94682011-11-10 14:57:27 +02002500
2501void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
2502{
2503 cap->version = x86_pmu.version;
2504 cap->num_counters_gp = x86_pmu.num_counters;
2505 cap->num_counters_fixed = x86_pmu.num_counters_fixed;
2506 cap->bit_width_gp = x86_pmu.cntval_bits;
2507 cap->bit_width_fixed = x86_pmu.cntval_bits;
2508 cap->events_mask = (unsigned int)x86_pmu.events_maskl;
2509 cap->events_mask_len = x86_pmu.events_mask_len;
2510}
2511EXPORT_SYMBOL_GPL(perf_get_x86_pmu_capability);