blob: 85ef3c2e80e0450350f347bfa08e3036c459e857 [file] [log] [blame]
Andy Lutomirskib7b7c782015-07-20 11:49:06 -04001#include <linux/perf_event.h>
2
3enum perf_msr_id {
4 PERF_MSR_TSC = 0,
5 PERF_MSR_APERF = 1,
6 PERF_MSR_MPERF = 2,
7 PERF_MSR_PPERF = 3,
8 PERF_MSR_SMI = 4,
Huang Rui8a224262016-01-29 16:29:56 +08009 PERF_MSR_PTSC = 5,
Huang Ruiaaf24882016-01-29 16:29:57 +080010 PERF_MSR_IRPERF = 6,
Andy Lutomirskib7b7c782015-07-20 11:49:06 -040011
12 PERF_MSR_EVENT_MAX,
13};
14
Geliang Tang7e5560a2015-09-24 04:48:53 -070015static bool test_aperfmperf(int idx)
Peter Zijlstra19b33402015-08-06 17:26:58 +020016{
17 return boot_cpu_has(X86_FEATURE_APERFMPERF);
18}
Andy Lutomirskib7b7c782015-07-20 11:49:06 -040019
Huang Rui8a224262016-01-29 16:29:56 +080020static bool test_ptsc(int idx)
21{
22 return boot_cpu_has(X86_FEATURE_PTSC);
23}
24
Huang Ruiaaf24882016-01-29 16:29:57 +080025static bool test_irperf(int idx)
26{
27 return boot_cpu_has(X86_FEATURE_IRPERF);
28}
29
Geliang Tang7e5560a2015-09-24 04:48:53 -070030static bool test_intel(int idx)
Peter Zijlstra19b33402015-08-06 17:26:58 +020031{
32 if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL ||
33 boot_cpu_data.x86 != 6)
34 return false;
35
36 switch (boot_cpu_data.x86_model) {
37 case 30: /* 45nm Nehalem */
38 case 26: /* 45nm Nehalem-EP */
39 case 46: /* 45nm Nehalem-EX */
40
41 case 37: /* 32nm Westmere */
42 case 44: /* 32nm Westmere-EP */
43 case 47: /* 32nm Westmere-EX */
44
45 case 42: /* 32nm SandyBridge */
46 case 45: /* 32nm SandyBridge-E/EN/EP */
47
48 case 58: /* 22nm IvyBridge */
49 case 62: /* 22nm IvyBridge-EP/EX */
50
51 case 60: /* 22nm Haswell Core */
52 case 63: /* 22nm Haswell Server */
53 case 69: /* 22nm Haswell ULT */
54 case 70: /* 22nm Haswell + GT3e (Intel Iris Pro graphics) */
55
56 case 61: /* 14nm Broadwell Core-M */
57 case 86: /* 14nm Broadwell Xeon D */
58 case 71: /* 14nm Broadwell + GT3e (Intel Iris Pro graphics) */
59 case 79: /* 14nm Broadwell Server */
60
61 case 55: /* 22nm Atom "Silvermont" */
62 case 77: /* 22nm Atom "Silvermont Avoton/Rangely" */
63 case 76: /* 14nm Atom "Airmont" */
64 if (idx == PERF_MSR_SMI)
65 return true;
66 break;
67
68 case 78: /* 14nm Skylake Mobile */
69 case 94: /* 14nm Skylake Desktop */
70 if (idx == PERF_MSR_SMI || idx == PERF_MSR_PPERF)
71 return true;
72 break;
73 }
74
75 return false;
76}
77
78struct perf_msr {
79 u64 msr;
80 struct perf_pmu_events_attr *attr;
81 bool (*test)(int idx);
Andy Lutomirskib7b7c782015-07-20 11:49:06 -040082};
83
Huang Ruiaaf24882016-01-29 16:29:57 +080084PMU_EVENT_ATTR_STRING(tsc, evattr_tsc, "event=0x00");
85PMU_EVENT_ATTR_STRING(aperf, evattr_aperf, "event=0x01");
86PMU_EVENT_ATTR_STRING(mperf, evattr_mperf, "event=0x02");
87PMU_EVENT_ATTR_STRING(pperf, evattr_pperf, "event=0x03");
88PMU_EVENT_ATTR_STRING(smi, evattr_smi, "event=0x04");
89PMU_EVENT_ATTR_STRING(ptsc, evattr_ptsc, "event=0x05");
90PMU_EVENT_ATTR_STRING(irperf, evattr_irperf, "event=0x06");
Andy Lutomirskib7b7c782015-07-20 11:49:06 -040091
Peter Zijlstra19b33402015-08-06 17:26:58 +020092static struct perf_msr msr[] = {
Huang Ruiaaf24882016-01-29 16:29:57 +080093 [PERF_MSR_TSC] = { 0, &evattr_tsc, NULL, },
94 [PERF_MSR_APERF] = { MSR_IA32_APERF, &evattr_aperf, test_aperfmperf, },
95 [PERF_MSR_MPERF] = { MSR_IA32_MPERF, &evattr_mperf, test_aperfmperf, },
96 [PERF_MSR_PPERF] = { MSR_PPERF, &evattr_pperf, test_intel, },
97 [PERF_MSR_SMI] = { MSR_SMI_COUNT, &evattr_smi, test_intel, },
Huang Rui8a224262016-01-29 16:29:56 +080098 [PERF_MSR_PTSC] = { MSR_F15H_PTSC, &evattr_ptsc, test_ptsc, },
Huang Ruiaaf24882016-01-29 16:29:57 +080099 [PERF_MSR_IRPERF] = { MSR_F17H_IRPERF, &evattr_irperf, test_irperf, },
Peter Zijlstra19b33402015-08-06 17:26:58 +0200100};
101
Andy Lutomirskib7b7c782015-07-20 11:49:06 -0400102static struct attribute *events_attrs[PERF_MSR_EVENT_MAX + 1] = {
Peter Zijlstra19b33402015-08-06 17:26:58 +0200103 NULL,
Andy Lutomirskib7b7c782015-07-20 11:49:06 -0400104};
105
106static struct attribute_group events_attr_group = {
107 .name = "events",
108 .attrs = events_attrs,
109};
110
111PMU_FORMAT_ATTR(event, "config:0-63");
112static struct attribute *format_attrs[] = {
113 &format_attr_event.attr,
114 NULL,
115};
116static struct attribute_group format_attr_group = {
117 .name = "format",
118 .attrs = format_attrs,
119};
120
121static const struct attribute_group *attr_groups[] = {
122 &events_attr_group,
123 &format_attr_group,
124 NULL,
125};
126
127static int msr_event_init(struct perf_event *event)
128{
129 u64 cfg = event->attr.config;
130
131 if (event->attr.type != event->pmu->type)
132 return -ENOENT;
133
134 if (cfg >= PERF_MSR_EVENT_MAX)
135 return -EINVAL;
136
137 /* unsupported modes and filters */
138 if (event->attr.exclude_user ||
139 event->attr.exclude_kernel ||
140 event->attr.exclude_hv ||
141 event->attr.exclude_idle ||
142 event->attr.exclude_host ||
143 event->attr.exclude_guest ||
144 event->attr.sample_period) /* no sampling */
145 return -EINVAL;
146
Peter Zijlstra19b33402015-08-06 17:26:58 +0200147 if (!msr[cfg].attr)
148 return -EINVAL;
149
Andy Lutomirskib7b7c782015-07-20 11:49:06 -0400150 event->hw.idx = -1;
151 event->hw.event_base = msr[cfg].msr;
152 event->hw.config = cfg;
153
154 return 0;
155}
156
157static inline u64 msr_read_counter(struct perf_event *event)
158{
159 u64 now;
160
161 if (event->hw.event_base)
162 rdmsrl(event->hw.event_base, now);
163 else
Ingo Molnar82819ff2015-08-21 08:14:46 +0200164 rdtscll(now);
Andy Lutomirskib7b7c782015-07-20 11:49:06 -0400165
166 return now;
167}
168static void msr_event_update(struct perf_event *event)
169{
170 u64 prev, now;
171 s64 delta;
172
173 /* Careful, an NMI might modify the previous event value. */
174again:
175 prev = local64_read(&event->hw.prev_count);
176 now = msr_read_counter(event);
177
178 if (local64_cmpxchg(&event->hw.prev_count, prev, now) != prev)
179 goto again;
180
181 delta = now - prev;
Martin Kepplinger78e3c792015-11-06 16:31:08 -0800182 if (unlikely(event->hw.event_base == MSR_SMI_COUNT))
183 delta = sign_extend64(delta, 31);
184
Peter Zijlstra3c3116b2016-05-10 14:16:54 +0200185 local64_add(delta, &event->count);
Andy Lutomirskib7b7c782015-07-20 11:49:06 -0400186}
187
188static void msr_event_start(struct perf_event *event, int flags)
189{
190 u64 now;
191
192 now = msr_read_counter(event);
193 local64_set(&event->hw.prev_count, now);
194}
195
196static void msr_event_stop(struct perf_event *event, int flags)
197{
198 msr_event_update(event);
199}
200
201static void msr_event_del(struct perf_event *event, int flags)
202{
203 msr_event_stop(event, PERF_EF_UPDATE);
204}
205
206static int msr_event_add(struct perf_event *event, int flags)
207{
208 if (flags & PERF_EF_START)
209 msr_event_start(event, flags);
210
211 return 0;
212}
213
214static struct pmu pmu_msr = {
215 .task_ctx_nr = perf_sw_context,
216 .attr_groups = attr_groups,
217 .event_init = msr_event_init,
218 .add = msr_event_add,
219 .del = msr_event_del,
220 .start = msr_event_start,
221 .stop = msr_event_stop,
222 .read = msr_event_update,
223 .capabilities = PERF_PMU_CAP_NO_INTERRUPT,
224};
225
Andy Lutomirskib7b7c782015-07-20 11:49:06 -0400226static int __init msr_init(void)
227{
Peter Zijlstra19b33402015-08-06 17:26:58 +0200228 int i, j = 0;
Andy Lutomirskib7b7c782015-07-20 11:49:06 -0400229
Peter Zijlstra19b33402015-08-06 17:26:58 +0200230 if (!boot_cpu_has(X86_FEATURE_TSC)) {
231 pr_cont("no MSR PMU driver.\n");
Andy Lutomirskib7b7c782015-07-20 11:49:06 -0400232 return 0;
233 }
234
Peter Zijlstra19b33402015-08-06 17:26:58 +0200235 /* Probe the MSRs. */
236 for (i = PERF_MSR_TSC + 1; i < PERF_MSR_EVENT_MAX; i++) {
237 u64 val;
238
239 /*
240 * Virt sucks arse; you cannot tell if a R/O MSR is present :/
241 */
242 if (!msr[i].test(i) || rdmsrl_safe(msr[i].msr, &val))
243 msr[i].attr = NULL;
244 }
245
246 /* List remaining MSRs in the sysfs attrs. */
247 for (i = 0; i < PERF_MSR_EVENT_MAX; i++) {
248 if (msr[i].attr)
249 events_attrs[j++] = &msr[i].attr->attr.attr;
250 }
251 events_attrs[j] = NULL;
252
Andy Lutomirskib7b7c782015-07-20 11:49:06 -0400253 perf_pmu_register(&pmu_msr, "msr", -1);
254
255 return 0;
256}
257device_initcall(msr_init);