blob: 096c975e099fee9ed2578b9ca4d9f77d9ef56006 [file] [log] [blame]
Ingo Molnarcdd6c482009-09-21 12:02:48 +02001#ifndef _ASM_X86_PERF_EVENT_H
2#define _ASM_X86_PERF_EVENT_H
Thomas Gleixner003a46c2007-10-15 13:57:47 +02003
Ingo Molnareb2b8612008-12-17 09:09:13 +01004/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +02005 * Performance event hw details:
Ingo Molnareb2b8612008-12-17 09:09:13 +01006 */
7
Cyrill Gorcunova0727382010-03-11 19:54:39 +03008#define X86_PMC_MAX_GENERIC 32
Ingo Molnareb2b8612008-12-17 09:09:13 +01009#define X86_PMC_MAX_FIXED 3
10
Ingo Molnar862a1a52008-12-17 13:09:20 +010011#define X86_PMC_IDX_GENERIC 0
12#define X86_PMC_IDX_FIXED 32
13#define X86_PMC_IDX_MAX 64
14
Ingo Molnar241771e2008-12-03 10:39:53 +010015#define MSR_ARCH_PERFMON_PERFCTR0 0xc1
16#define MSR_ARCH_PERFMON_PERFCTR1 0xc2
Thomas Gleixner003a46c2007-10-15 13:57:47 +020017
Ingo Molnar241771e2008-12-03 10:39:53 +010018#define MSR_ARCH_PERFMON_EVENTSEL0 0x186
19#define MSR_ARCH_PERFMON_EVENTSEL1 0x187
Thomas Gleixner003a46c2007-10-15 13:57:47 +020020
Robert Richtera098f442010-03-30 11:28:21 +020021#define ARCH_PERFMON_EVENTSEL_EVENT 0x000000FFULL
22#define ARCH_PERFMON_EVENTSEL_UMASK 0x0000FF00ULL
23#define ARCH_PERFMON_EVENTSEL_USR (1ULL << 16)
24#define ARCH_PERFMON_EVENTSEL_OS (1ULL << 17)
25#define ARCH_PERFMON_EVENTSEL_EDGE (1ULL << 18)
26#define ARCH_PERFMON_EVENTSEL_INT (1ULL << 20)
27#define ARCH_PERFMON_EVENTSEL_ANY (1ULL << 21)
28#define ARCH_PERFMON_EVENTSEL_ENABLE (1ULL << 22)
29#define ARCH_PERFMON_EVENTSEL_INV (1ULL << 23)
30#define ARCH_PERFMON_EVENTSEL_CMASK 0xFF000000ULL
Thomas Gleixner003a46c2007-10-15 13:57:47 +020031
Joerg Roedel011af852011-10-05 14:01:17 +020032#define AMD_PERFMON_EVENTSEL_GUESTONLY (1ULL << 40)
33#define AMD_PERFMON_EVENTSEL_HOSTONLY (1ULL << 41)
34
Robert Richtera098f442010-03-30 11:28:21 +020035#define AMD64_EVENTSEL_EVENT \
36 (ARCH_PERFMON_EVENTSEL_EVENT | (0x0FULL << 32))
37#define INTEL_ARCH_EVENT_MASK \
38 (ARCH_PERFMON_EVENTSEL_UMASK | ARCH_PERFMON_EVENTSEL_EVENT)
Stephane Eranian1da53e02010-01-18 10:58:01 +020039
Robert Richtera098f442010-03-30 11:28:21 +020040#define X86_RAW_EVENT_MASK \
41 (ARCH_PERFMON_EVENTSEL_EVENT | \
42 ARCH_PERFMON_EVENTSEL_UMASK | \
43 ARCH_PERFMON_EVENTSEL_EDGE | \
44 ARCH_PERFMON_EVENTSEL_INV | \
45 ARCH_PERFMON_EVENTSEL_CMASK)
46#define AMD64_RAW_EVENT_MASK \
47 (X86_RAW_EVENT_MASK | \
48 AMD64_EVENTSEL_EVENT)
Robert Richteree5789d2011-09-21 11:30:17 +020049#define AMD64_NUM_COUNTERS 4
50#define AMD64_NUM_COUNTERS_F15H 6
51#define AMD64_NUM_COUNTERS_MAX AMD64_NUM_COUNTERS_F15H
Stephane Eranian04a705df2009-10-06 16:42:08 +020052
Robert Richteree5789d2011-09-21 11:30:17 +020053#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_SEL 0x3c
Ingo Molnar241771e2008-12-03 10:39:53 +010054#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_UMASK (0x00 << 8)
Robert Richteree5789d2011-09-21 11:30:17 +020055#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX 0
Thomas Gleixner003a46c2007-10-15 13:57:47 +020056#define ARCH_PERFMON_UNHALTED_CORE_CYCLES_PRESENT \
Ingo Molnar241771e2008-12-03 10:39:53 +010057 (1 << (ARCH_PERFMON_UNHALTED_CORE_CYCLES_INDEX))
58
Robert Richteree5789d2011-09-21 11:30:17 +020059#define ARCH_PERFMON_BRANCH_MISSES_RETIRED 6
Gleb Natapovffb871b2011-11-10 14:57:26 +020060#define ARCH_PERFMON_EVENTS_COUNT 7
Thomas Gleixner003a46c2007-10-15 13:57:47 +020061
Ingo Molnareb2b8612008-12-17 09:09:13 +010062/*
63 * Intel "Architectural Performance Monitoring" CPUID
64 * detection/enumeration details:
65 */
Thomas Gleixner003a46c2007-10-15 13:57:47 +020066union cpuid10_eax {
67 struct {
68 unsigned int version_id:8;
Robert Richter948b1bb2010-03-29 18:36:50 +020069 unsigned int num_counters:8;
Thomas Gleixner003a46c2007-10-15 13:57:47 +020070 unsigned int bit_width:8;
71 unsigned int mask_length:8;
72 } split;
73 unsigned int full;
74};
75
Gleb Natapovffb871b2011-11-10 14:57:26 +020076union cpuid10_ebx {
77 struct {
78 unsigned int no_unhalted_core_cycles:1;
79 unsigned int no_instructions_retired:1;
80 unsigned int no_unhalted_reference_cycles:1;
81 unsigned int no_llc_reference:1;
82 unsigned int no_llc_misses:1;
83 unsigned int no_branch_instruction_retired:1;
84 unsigned int no_branch_misses_retired:1;
85 } split;
86 unsigned int full;
87};
88
Ingo Molnar703e9372008-12-17 10:51:15 +010089union cpuid10_edx {
90 struct {
Livio Soarese768aee2010-06-03 15:00:31 -040091 unsigned int num_counters_fixed:5;
92 unsigned int bit_width_fixed:8;
93 unsigned int reserved:19;
Ingo Molnar703e9372008-12-17 10:51:15 +010094 } split;
95 unsigned int full;
96};
97
Gleb Natapovb3d94682011-11-10 14:57:27 +020098struct x86_pmu_capability {
99 int version;
100 int num_counters_gp;
101 int num_counters_fixed;
102 int bit_width_gp;
103 int bit_width_fixed;
104 unsigned int events_mask;
105 int events_mask_len;
106};
Ingo Molnar703e9372008-12-17 10:51:15 +0100107
108/*
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200109 * Fixed-purpose performance events:
Ingo Molnar703e9372008-12-17 10:51:15 +0100110 */
111
Ingo Molnar862a1a52008-12-17 13:09:20 +0100112/*
113 * All 3 fixed-mode PMCs are configured via this single MSR:
114 */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +0100115#define MSR_ARCH_PERFMON_FIXED_CTR_CTRL 0x38d
Ingo Molnar862a1a52008-12-17 13:09:20 +0100116
117/*
118 * The counts are available in three separate MSRs:
119 */
120
Ingo Molnar703e9372008-12-17 10:51:15 +0100121/* Instr_Retired.Any: */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +0100122#define MSR_ARCH_PERFMON_FIXED_CTR0 0x309
123#define X86_PMC_IDX_FIXED_INSTRUCTIONS (X86_PMC_IDX_FIXED + 0)
Ingo Molnar703e9372008-12-17 10:51:15 +0100124
125/* CPU_CLK_Unhalted.Core: */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +0100126#define MSR_ARCH_PERFMON_FIXED_CTR1 0x30a
127#define X86_PMC_IDX_FIXED_CPU_CYCLES (X86_PMC_IDX_FIXED + 1)
Ingo Molnar703e9372008-12-17 10:51:15 +0100128
129/* CPU_CLK_Unhalted.Ref: */
Stephane Eraniancd09c0c2011-12-11 00:28:51 +0100130#define MSR_ARCH_PERFMON_FIXED_CTR2 0x30b
131#define X86_PMC_IDX_FIXED_REF_CYCLES (X86_PMC_IDX_FIXED + 2)
132#define X86_PMC_MSK_FIXED_REF_CYCLES (1ULL << X86_PMC_IDX_FIXED_REF_CYCLES)
Ingo Molnar703e9372008-12-17 10:51:15 +0100133
Markus Metzger30dd5682009-07-21 15:56:48 +0200134/*
135 * We model BTS tracing as another fixed-mode PMC.
136 *
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200137 * We choose a value in the middle of the fixed event range, since lower
138 * values are used by actual fixed events and higher values are used
Markus Metzger30dd5682009-07-21 15:56:48 +0200139 * to indicate other overflow conditions in the PERF_GLOBAL_STATUS msr.
140 */
141#define X86_PMC_IDX_FIXED_BTS (X86_PMC_IDX_FIXED + 16)
142
Robert Richteree5789d2011-09-21 11:30:17 +0200143/*
144 * IBS cpuid feature detection
145 */
146
147#define IBS_CPUID_FEATURES 0x8000001b
148
149/*
150 * Same bit mask as for IBS cpuid feature flags (Fn8000_001B_EAX), but
151 * bit 0 is used to indicate the existence of IBS.
152 */
153#define IBS_CAPS_AVAIL (1U<<0)
154#define IBS_CAPS_FETCHSAM (1U<<1)
155#define IBS_CAPS_OPSAM (1U<<2)
156#define IBS_CAPS_RDWROPCNT (1U<<3)
157#define IBS_CAPS_OPCNT (1U<<4)
158#define IBS_CAPS_BRNTRGT (1U<<5)
159#define IBS_CAPS_OPCNTEXT (1U<<6)
160
161#define IBS_CAPS_DEFAULT (IBS_CAPS_AVAIL \
162 | IBS_CAPS_FETCHSAM \
163 | IBS_CAPS_OPSAM)
164
165/*
166 * IBS APIC setup
167 */
168#define IBSCTL 0x1cc
169#define IBSCTL_LVT_OFFSET_VALID (1ULL<<8)
170#define IBSCTL_LVT_OFFSET_MASK 0x0F
171
Robert Richter1d6040f2010-02-25 19:40:46 +0100172/* IbsFetchCtl bits/masks */
Robert Richterb47fad32010-09-22 17:45:39 +0200173#define IBS_FETCH_RAND_EN (1ULL<<57)
174#define IBS_FETCH_VAL (1ULL<<49)
175#define IBS_FETCH_ENABLE (1ULL<<48)
176#define IBS_FETCH_CNT 0xFFFF0000ULL
177#define IBS_FETCH_MAX_CNT 0x0000FFFFULL
Robert Richter1d6040f2010-02-25 19:40:46 +0100178
179/* IbsOpCtl bits */
Robert Richterb47fad32010-09-22 17:45:39 +0200180#define IBS_OP_CNT_CTL (1ULL<<19)
181#define IBS_OP_VAL (1ULL<<18)
182#define IBS_OP_ENABLE (1ULL<<17)
183#define IBS_OP_MAX_CNT 0x0000FFFFULL
184#define IBS_OP_MAX_CNT_EXT 0x007FFFFFULL /* not a register bit mask */
Markus Metzger30dd5682009-07-21 15:56:48 +0200185
Robert Richterb7169162011-09-21 11:30:18 +0200186extern u32 get_ibs_caps(void);
187
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200188#ifdef CONFIG_PERF_EVENTS
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200189extern void perf_events_lapic_init(void);
Peter Zijlstra194002b2009-06-22 16:35:24 +0200190
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200191#define PERF_EVENT_INDEX_OFFSET 0
Peter Zijlstra194002b2009-06-22 16:35:24 +0200192
Peter Zijlstraef21f682010-03-03 13:12:23 +0100193/*
194 * Abuse bit 3 of the cpu eflags register to indicate proper PEBS IP fixups.
195 * This flag is otherwise unused and ABI specified to be 0, so nobody should
196 * care what we do with it.
197 */
198#define PERF_EFLAGS_EXACT (1UL << 3)
199
Zhang, Yanmin39447b32010-04-19 13:32:41 +0800200struct pt_regs;
201extern unsigned long perf_instruction_pointer(struct pt_regs *regs);
202extern unsigned long perf_misc_flags(struct pt_regs *regs);
203#define perf_misc_flags(regs) perf_misc_flags(regs)
Peter Zijlstraef21f682010-03-03 13:12:23 +0100204
Frederic Weisbeckerb0f82b82010-05-20 07:47:21 +0200205#include <asm/stacktrace.h>
206
207/*
208 * We abuse bit 3 from flags to pass exact information, see perf_misc_flags
209 * and the comment with PERF_EFLAGS_EXACT.
210 */
211#define perf_arch_fetch_caller_regs(regs, __ip) { \
212 (regs)->ip = (__ip); \
213 (regs)->bp = caller_frame_pointer(); \
214 (regs)->cs = __KERNEL_CS; \
215 regs->flags = 0; \
Frederic Weisbecker9e462942011-07-02 15:00:52 +0200216 asm volatile( \
217 _ASM_MOV "%%"_ASM_SP ", %0\n" \
218 : "=m" ((regs)->sp) \
219 :: "memory" \
220 ); \
Frederic Weisbeckerb0f82b82010-05-20 07:47:21 +0200221}
222
Gleb Natapov144d31e2011-10-05 14:01:21 +0200223struct perf_guest_switch_msr {
224 unsigned msr;
225 u64 host, guest;
226};
227
228extern struct perf_guest_switch_msr *perf_guest_get_msrs(int *nr);
Gleb Natapovb3d94682011-11-10 14:57:27 +0200229extern void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap);
Ingo Molnar241771e2008-12-03 10:39:53 +0100230#else
Gleb Natapov144d31e2011-10-05 14:01:21 +0200231static inline perf_guest_switch_msr *perf_guest_get_msrs(int *nr)
232{
233 *nr = 0;
234 return NULL;
235}
236
Gleb Natapovb3d94682011-11-10 14:57:27 +0200237static inline void perf_get_x86_pmu_capability(struct x86_pmu_capability *cap)
238{
239 memset(cap, 0, sizeof(*cap));
240}
241
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200242static inline void perf_events_lapic_init(void) { }
Ingo Molnar241771e2008-12-03 10:39:53 +0100243#endif
244
Ingo Molnarcdd6c482009-09-21 12:02:48 +0200245#endif /* _ASM_X86_PERF_EVENT_H */