blob: b52a8ed283b2a74366ae261c52fa71b6e04597fd [file] [log] [blame]
Gleb Natapovf5132b02011-11-10 14:57:22 +02001/*
2 * Kernel-based Virtual Machine -- Performane Monitoring Unit support
3 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 * Gleb Natapov <gleb@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
12 *
13 */
14
15#include <linux/types.h>
16#include <linux/kvm_host.h>
17#include <linux/perf_event.h>
18#include "x86.h"
19#include "cpuid.h"
20#include "lapic.h"
21
22static struct kvm_arch_event_perf_mapping {
23 u8 eventsel;
24 u8 unit_mask;
25 unsigned event_type;
26 bool inexact;
27} arch_events[] = {
28 /* Index must match CPUID 0x0A.EBX bit vector */
29 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
30 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
31 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
32 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
33 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
34 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
35 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
36};
37
38/* mapping between fixed pmc index and arch_events array */
39int fixed_pmc_events[] = {1, 0, 2};
40
41static bool pmc_is_gp(struct kvm_pmc *pmc)
42{
43 return pmc->type == KVM_PMC_GP;
44}
45
46static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
47{
48 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
49
50 return pmu->counter_bitmask[pmc->type];
51}
52
53static inline bool pmc_enabled(struct kvm_pmc *pmc)
54{
55 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
56 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
57}
58
59static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
60 u32 base)
61{
62 if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
63 return &pmu->gp_counters[msr - base];
64 return NULL;
65}
66
67static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
68{
69 int base = MSR_CORE_PERF_FIXED_CTR0;
70 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
71 return &pmu->fixed_counters[msr - base];
72 return NULL;
73}
74
75static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
76{
77 return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
78}
79
80static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
81{
82 if (idx < X86_PMC_IDX_FIXED)
83 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
84 else
85 return get_fixed_pmc_idx(pmu, idx - X86_PMC_IDX_FIXED);
86}
87
88void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
89{
90 if (vcpu->arch.apic)
91 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
92}
93
94static void trigger_pmi(struct irq_work *irq_work)
95{
96 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
97 irq_work);
98 struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
99 arch.pmu);
100
101 kvm_deliver_pmi(vcpu);
102}
103
104static void kvm_perf_overflow(struct perf_event *perf_event,
105 struct perf_sample_data *data,
106 struct pt_regs *regs)
107{
108 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
109 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
110 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
111}
112
113static void kvm_perf_overflow_intr(struct perf_event *perf_event,
114 struct perf_sample_data *data, struct pt_regs *regs)
115{
116 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
117 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
118 if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
119 kvm_perf_overflow(perf_event, data, regs);
120 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
121 /*
122 * Inject PMI. If vcpu was in a guest mode during NMI PMI
123 * can be ejected on a guest mode re-entry. Otherwise we can't
124 * be sure that vcpu wasn't executing hlt instruction at the
125 * time of vmexit and is not going to re-enter guest mode until,
126 * woken up. So we should wake it, but this is impossible from
127 * NMI context. Do it from irq work instead.
128 */
129 if (!kvm_is_in_guest())
130 irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
131 else
132 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
133 }
134}
135
136static u64 read_pmc(struct kvm_pmc *pmc)
137{
138 u64 counter, enabled, running;
139
140 counter = pmc->counter;
141
142 if (pmc->perf_event)
143 counter += perf_event_read_value(pmc->perf_event,
144 &enabled, &running);
145
146 /* FIXME: Scaling needed? */
147
148 return counter & pmc_bitmask(pmc);
149}
150
151static void stop_counter(struct kvm_pmc *pmc)
152{
153 if (pmc->perf_event) {
154 pmc->counter = read_pmc(pmc);
155 perf_event_release_kernel(pmc->perf_event);
156 pmc->perf_event = NULL;
157 }
158}
159
160static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
161 unsigned config, bool exclude_user, bool exclude_kernel,
162 bool intr)
163{
164 struct perf_event *event;
165 struct perf_event_attr attr = {
166 .type = type,
167 .size = sizeof(attr),
168 .pinned = true,
169 .exclude_idle = true,
170 .exclude_host = 1,
171 .exclude_user = exclude_user,
172 .exclude_kernel = exclude_kernel,
173 .config = config,
174 };
175
176 attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
177
178 event = perf_event_create_kernel_counter(&attr, -1, current,
179 intr ? kvm_perf_overflow_intr :
180 kvm_perf_overflow, pmc);
181 if (IS_ERR(event)) {
182 printk_once("kvm: pmu event creation failed %ld\n",
183 PTR_ERR(event));
184 return;
185 }
186
187 pmc->perf_event = event;
188 clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
189}
190
191static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
192 u8 unit_mask)
193{
194 int i;
195
196 for (i = 0; i < ARRAY_SIZE(arch_events); i++)
197 if (arch_events[i].eventsel == event_select
198 && arch_events[i].unit_mask == unit_mask
199 && (pmu->available_event_types & (1 << i)))
200 break;
201
202 if (i == ARRAY_SIZE(arch_events))
203 return PERF_COUNT_HW_MAX;
204
205 return arch_events[i].event_type;
206}
207
208static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
209{
210 unsigned config, type = PERF_TYPE_RAW;
211 u8 event_select, unit_mask;
212
Gleb Natapova7b9d2c2012-02-26 16:55:40 +0200213 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
214 printk_once("kvm pmu: pin control bit is ignored\n");
215
Gleb Natapovf5132b02011-11-10 14:57:22 +0200216 pmc->eventsel = eventsel;
217
218 stop_counter(pmc);
219
220 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
221 return;
222
223 event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
224 unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
225
Gleb Natapovfac33682012-02-26 16:55:41 +0200226 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
Gleb Natapovf5132b02011-11-10 14:57:22 +0200227 ARCH_PERFMON_EVENTSEL_INV |
228 ARCH_PERFMON_EVENTSEL_CMASK))) {
229 config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
230 unit_mask);
231 if (config != PERF_COUNT_HW_MAX)
232 type = PERF_TYPE_HARDWARE;
233 }
234
235 if (type == PERF_TYPE_RAW)
236 config = eventsel & X86_RAW_EVENT_MASK;
237
238 reprogram_counter(pmc, type, config,
239 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
240 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
241 eventsel & ARCH_PERFMON_EVENTSEL_INT);
242}
243
244static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
245{
246 unsigned en = en_pmi & 0x3;
247 bool pmi = en_pmi & 0x8;
248
249 stop_counter(pmc);
250
251 if (!en || !pmc_enabled(pmc))
252 return;
253
254 reprogram_counter(pmc, PERF_TYPE_HARDWARE,
255 arch_events[fixed_pmc_events[idx]].event_type,
256 !(en & 0x2), /* exclude user */
257 !(en & 0x1), /* exclude kernel */
258 pmi);
259}
260
261static inline u8 fixed_en_pmi(u64 ctrl, int idx)
262{
263 return (ctrl >> (idx * 4)) & 0xf;
264}
265
266static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
267{
268 int i;
269
270 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
271 u8 en_pmi = fixed_en_pmi(data, i);
272 struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
273
274 if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
275 continue;
276
277 reprogram_fixed_counter(pmc, en_pmi, i);
278 }
279
280 pmu->fixed_ctr_ctrl = data;
281}
282
283static void reprogram_idx(struct kvm_pmu *pmu, int idx)
284{
285 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
286
287 if (!pmc)
288 return;
289
290 if (pmc_is_gp(pmc))
291 reprogram_gp_counter(pmc, pmc->eventsel);
292 else {
293 int fidx = idx - X86_PMC_IDX_FIXED;
294 reprogram_fixed_counter(pmc,
295 fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
296 }
297}
298
299static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
300{
301 int bit;
302 u64 diff = pmu->global_ctrl ^ data;
303
304 pmu->global_ctrl = data;
305
306 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
307 reprogram_idx(pmu, bit);
308}
309
310bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
311{
312 struct kvm_pmu *pmu = &vcpu->arch.pmu;
313 int ret;
314
315 switch (msr) {
316 case MSR_CORE_PERF_FIXED_CTR_CTRL:
317 case MSR_CORE_PERF_GLOBAL_STATUS:
318 case MSR_CORE_PERF_GLOBAL_CTRL:
319 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
320 ret = pmu->version > 1;
321 break;
322 default:
323 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
324 || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
325 || get_fixed_pmc(pmu, msr);
326 break;
327 }
328 return ret;
329}
330
331int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
332{
333 struct kvm_pmu *pmu = &vcpu->arch.pmu;
334 struct kvm_pmc *pmc;
335
336 switch (index) {
337 case MSR_CORE_PERF_FIXED_CTR_CTRL:
338 *data = pmu->fixed_ctr_ctrl;
339 return 0;
340 case MSR_CORE_PERF_GLOBAL_STATUS:
341 *data = pmu->global_status;
342 return 0;
343 case MSR_CORE_PERF_GLOBAL_CTRL:
344 *data = pmu->global_ctrl;
345 return 0;
346 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
347 *data = pmu->global_ovf_ctrl;
348 return 0;
349 default:
350 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
351 (pmc = get_fixed_pmc(pmu, index))) {
352 *data = read_pmc(pmc);
353 return 0;
354 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
355 *data = pmc->eventsel;
356 return 0;
357 }
358 }
359 return 1;
360}
361
362int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, u32 index, u64 data)
363{
364 struct kvm_pmu *pmu = &vcpu->arch.pmu;
365 struct kvm_pmc *pmc;
366
367 switch (index) {
368 case MSR_CORE_PERF_FIXED_CTR_CTRL:
369 if (pmu->fixed_ctr_ctrl == data)
370 return 0;
371 if (!(data & 0xfffffffffffff444)) {
372 reprogram_fixed_counters(pmu, data);
373 return 0;
374 }
375 break;
376 case MSR_CORE_PERF_GLOBAL_STATUS:
377 break; /* RO MSR */
378 case MSR_CORE_PERF_GLOBAL_CTRL:
379 if (pmu->global_ctrl == data)
380 return 0;
381 if (!(data & pmu->global_ctrl_mask)) {
382 global_ctrl_changed(pmu, data);
383 return 0;
384 }
385 break;
386 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
387 if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
388 pmu->global_status &= ~data;
389 pmu->global_ovf_ctrl = data;
390 return 0;
391 }
392 break;
393 default:
394 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
395 (pmc = get_fixed_pmc(pmu, index))) {
396 data = (s64)(s32)data;
397 pmc->counter += data - read_pmc(pmc);
398 return 0;
399 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
400 if (data == pmc->eventsel)
401 return 0;
402 if (!(data & 0xffffffff00200000ull)) {
403 reprogram_gp_counter(pmc, data);
404 return 0;
405 }
406 }
407 }
408 return 1;
409}
410
411int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
412{
413 struct kvm_pmu *pmu = &vcpu->arch.pmu;
414 bool fast_mode = pmc & (1u << 31);
415 bool fixed = pmc & (1u << 30);
416 struct kvm_pmc *counters;
417 u64 ctr;
418
Gleb Natapov270c6c72012-02-16 14:44:11 +0200419 pmc &= ~(3u << 30);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200420 if (!fixed && pmc >= pmu->nr_arch_gp_counters)
421 return 1;
422 if (fixed && pmc >= pmu->nr_arch_fixed_counters)
423 return 1;
424 counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
425 ctr = read_pmc(&counters[pmc]);
426 if (fast_mode)
427 ctr = (u32)ctr;
428 *data = ctr;
429
430 return 0;
431}
432
433void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
434{
435 struct kvm_pmu *pmu = &vcpu->arch.pmu;
436 struct kvm_cpuid_entry2 *entry;
437 unsigned bitmap_len;
438
439 pmu->nr_arch_gp_counters = 0;
440 pmu->nr_arch_fixed_counters = 0;
441 pmu->counter_bitmask[KVM_PMC_GP] = 0;
442 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
443 pmu->version = 0;
444
445 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
446 if (!entry)
447 return;
448
449 pmu->version = entry->eax & 0xff;
450 if (!pmu->version)
451 return;
452
453 pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
454 X86_PMC_MAX_GENERIC);
455 pmu->counter_bitmask[KVM_PMC_GP] =
456 ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
457 bitmap_len = (entry->eax >> 24) & 0xff;
458 pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
459
460 if (pmu->version == 1) {
461 pmu->global_ctrl = (1 << pmu->nr_arch_gp_counters) - 1;
462 return;
463 }
464
465 pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
466 X86_PMC_MAX_FIXED);
467 pmu->counter_bitmask[KVM_PMC_FIXED] =
468 ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
469 pmu->global_ctrl_mask = ~(((1 << pmu->nr_arch_gp_counters) - 1)
470 | (((1ull << pmu->nr_arch_fixed_counters) - 1)
471 << X86_PMC_IDX_FIXED));
472}
473
474void kvm_pmu_init(struct kvm_vcpu *vcpu)
475{
476 int i;
477 struct kvm_pmu *pmu = &vcpu->arch.pmu;
478
479 memset(pmu, 0, sizeof(*pmu));
480 for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
481 pmu->gp_counters[i].type = KVM_PMC_GP;
482 pmu->gp_counters[i].vcpu = vcpu;
483 pmu->gp_counters[i].idx = i;
484 }
485 for (i = 0; i < X86_PMC_MAX_FIXED; i++) {
486 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
487 pmu->fixed_counters[i].vcpu = vcpu;
488 pmu->fixed_counters[i].idx = i + X86_PMC_IDX_FIXED;
489 }
490 init_irq_work(&pmu->irq_work, trigger_pmi);
491 kvm_pmu_cpuid_update(vcpu);
492}
493
494void kvm_pmu_reset(struct kvm_vcpu *vcpu)
495{
496 struct kvm_pmu *pmu = &vcpu->arch.pmu;
497 int i;
498
499 irq_work_sync(&pmu->irq_work);
500 for (i = 0; i < X86_PMC_MAX_GENERIC; i++) {
501 struct kvm_pmc *pmc = &pmu->gp_counters[i];
502 stop_counter(pmc);
503 pmc->counter = pmc->eventsel = 0;
504 }
505
506 for (i = 0; i < X86_PMC_MAX_FIXED; i++)
507 stop_counter(&pmu->fixed_counters[i]);
508
509 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
510 pmu->global_ovf_ctrl = 0;
511}
512
513void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
514{
515 kvm_pmu_reset(vcpu);
516}
517
518void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
519{
520 struct kvm_pmu *pmu = &vcpu->arch.pmu;
521 u64 bitmask;
522 int bit;
523
524 bitmask = pmu->reprogram_pmi;
525
526 for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
527 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
528
529 if (unlikely(!pmc || !pmc->perf_event)) {
530 clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
531 continue;
532 }
533
534 reprogram_idx(pmu, bit);
535 }
536}