blob: cbecaa90399c1fbb1555ea5309a43f392f36fda2 [file] [log] [blame]
Gleb Natapovf5132b02011-11-10 14:57:22 +02001/*
Guo Chaoc7a70622012-06-28 15:23:08 +08002 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
Gleb Natapovf5132b02011-11-10 14:57:22 +02003 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 * Gleb Natapov <gleb@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
12 *
13 */
14
15#include <linux/types.h>
16#include <linux/kvm_host.h>
17#include <linux/perf_event.h>
18#include "x86.h"
19#include "cpuid.h"
20#include "lapic.h"
21
22static struct kvm_arch_event_perf_mapping {
23 u8 eventsel;
24 u8 unit_mask;
25 unsigned event_type;
26 bool inexact;
27} arch_events[] = {
28 /* Index must match CPUID 0x0A.EBX bit vector */
29 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
30 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
31 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
32 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
33 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
34 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
35 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
Gleb Natapov62079d82012-02-26 16:55:42 +020036 [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
Gleb Natapovf5132b02011-11-10 14:57:22 +020037};
38
39/* mapping between fixed pmc index and arch_events array */
Gleb Natapov62079d82012-02-26 16:55:42 +020040int fixed_pmc_events[] = {1, 0, 7};
Gleb Natapovf5132b02011-11-10 14:57:22 +020041
42static bool pmc_is_gp(struct kvm_pmc *pmc)
43{
44 return pmc->type == KVM_PMC_GP;
45}
46
47static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
48{
49 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
50
51 return pmu->counter_bitmask[pmc->type];
52}
53
54static inline bool pmc_enabled(struct kvm_pmc *pmc)
55{
56 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
57 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
58}
59
60static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
61 u32 base)
62{
63 if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
64 return &pmu->gp_counters[msr - base];
65 return NULL;
66}
67
68static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
69{
70 int base = MSR_CORE_PERF_FIXED_CTR0;
71 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
72 return &pmu->fixed_counters[msr - base];
73 return NULL;
74}
75
76static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
77{
78 return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
79}
80
81static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
82{
Robert Richter15c7ad52012-06-20 20:46:33 +020083 if (idx < INTEL_PMC_IDX_FIXED)
Gleb Natapovf5132b02011-11-10 14:57:22 +020084 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
85 else
Robert Richter15c7ad52012-06-20 20:46:33 +020086 return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
Gleb Natapovf5132b02011-11-10 14:57:22 +020087}
88
89void kvm_deliver_pmi(struct kvm_vcpu *vcpu)
90{
91 if (vcpu->arch.apic)
92 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
93}
94
95static void trigger_pmi(struct irq_work *irq_work)
96{
97 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
98 irq_work);
99 struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
100 arch.pmu);
101
102 kvm_deliver_pmi(vcpu);
103}
104
105static void kvm_perf_overflow(struct perf_event *perf_event,
106 struct perf_sample_data *data,
107 struct pt_regs *regs)
108{
109 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
110 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
Nadav Amit671bd992014-04-18 03:35:08 +0300111 if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
112 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
113 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
114 }
Gleb Natapovf5132b02011-11-10 14:57:22 +0200115}
116
117static void kvm_perf_overflow_intr(struct perf_event *perf_event,
118 struct perf_sample_data *data, struct pt_regs *regs)
119{
120 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
121 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
122 if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
Nadav Amit671bd992014-04-18 03:35:08 +0300123 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200124 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
125 /*
126 * Inject PMI. If vcpu was in a guest mode during NMI PMI
127 * can be ejected on a guest mode re-entry. Otherwise we can't
128 * be sure that vcpu wasn't executing hlt instruction at the
129 * time of vmexit and is not going to re-enter guest mode until,
130 * woken up. So we should wake it, but this is impossible from
131 * NMI context. Do it from irq work instead.
132 */
133 if (!kvm_is_in_guest())
134 irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
135 else
136 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
137 }
138}
139
140static u64 read_pmc(struct kvm_pmc *pmc)
141{
142 u64 counter, enabled, running;
143
144 counter = pmc->counter;
145
146 if (pmc->perf_event)
147 counter += perf_event_read_value(pmc->perf_event,
148 &enabled, &running);
149
150 /* FIXME: Scaling needed? */
151
152 return counter & pmc_bitmask(pmc);
153}
154
155static void stop_counter(struct kvm_pmc *pmc)
156{
157 if (pmc->perf_event) {
158 pmc->counter = read_pmc(pmc);
159 perf_event_release_kernel(pmc->perf_event);
160 pmc->perf_event = NULL;
161 }
162}
163
164static void reprogram_counter(struct kvm_pmc *pmc, u32 type,
165 unsigned config, bool exclude_user, bool exclude_kernel,
Andi Kleen103af0a2013-07-18 15:57:02 -0700166 bool intr, bool in_tx, bool in_tx_cp)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200167{
168 struct perf_event *event;
169 struct perf_event_attr attr = {
170 .type = type,
171 .size = sizeof(attr),
172 .pinned = true,
173 .exclude_idle = true,
174 .exclude_host = 1,
175 .exclude_user = exclude_user,
176 .exclude_kernel = exclude_kernel,
177 .config = config,
178 };
Andi Kleen103af0a2013-07-18 15:57:02 -0700179 if (in_tx)
180 attr.config |= HSW_IN_TX;
181 if (in_tx_cp)
182 attr.config |= HSW_IN_TX_CHECKPOINTED;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200183
184 attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
185
186 event = perf_event_create_kernel_counter(&attr, -1, current,
187 intr ? kvm_perf_overflow_intr :
188 kvm_perf_overflow, pmc);
189 if (IS_ERR(event)) {
190 printk_once("kvm: pmu event creation failed %ld\n",
191 PTR_ERR(event));
192 return;
193 }
194
195 pmc->perf_event = event;
196 clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
197}
198
199static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
200 u8 unit_mask)
201{
202 int i;
203
204 for (i = 0; i < ARRAY_SIZE(arch_events); i++)
205 if (arch_events[i].eventsel == event_select
206 && arch_events[i].unit_mask == unit_mask
207 && (pmu->available_event_types & (1 << i)))
208 break;
209
210 if (i == ARRAY_SIZE(arch_events))
211 return PERF_COUNT_HW_MAX;
212
213 return arch_events[i].event_type;
214}
215
216static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
217{
218 unsigned config, type = PERF_TYPE_RAW;
219 u8 event_select, unit_mask;
220
Gleb Natapova7b9d2c2012-02-26 16:55:40 +0200221 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
222 printk_once("kvm pmu: pin control bit is ignored\n");
223
Gleb Natapovf5132b02011-11-10 14:57:22 +0200224 pmc->eventsel = eventsel;
225
226 stop_counter(pmc);
227
228 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_enabled(pmc))
229 return;
230
231 event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
232 unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
233
Gleb Natapovfac33682012-02-26 16:55:41 +0200234 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
Gleb Natapovf5132b02011-11-10 14:57:22 +0200235 ARCH_PERFMON_EVENTSEL_INV |
Andi Kleen103af0a2013-07-18 15:57:02 -0700236 ARCH_PERFMON_EVENTSEL_CMASK |
237 HSW_IN_TX |
238 HSW_IN_TX_CHECKPOINTED))) {
Gleb Natapovf5132b02011-11-10 14:57:22 +0200239 config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
240 unit_mask);
241 if (config != PERF_COUNT_HW_MAX)
242 type = PERF_TYPE_HARDWARE;
243 }
244
245 if (type == PERF_TYPE_RAW)
246 config = eventsel & X86_RAW_EVENT_MASK;
247
248 reprogram_counter(pmc, type, config,
249 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
250 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
Andi Kleen103af0a2013-07-18 15:57:02 -0700251 eventsel & ARCH_PERFMON_EVENTSEL_INT,
252 (eventsel & HSW_IN_TX),
253 (eventsel & HSW_IN_TX_CHECKPOINTED));
Gleb Natapovf5132b02011-11-10 14:57:22 +0200254}
255
256static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
257{
258 unsigned en = en_pmi & 0x3;
259 bool pmi = en_pmi & 0x8;
260
261 stop_counter(pmc);
262
263 if (!en || !pmc_enabled(pmc))
264 return;
265
266 reprogram_counter(pmc, PERF_TYPE_HARDWARE,
267 arch_events[fixed_pmc_events[idx]].event_type,
268 !(en & 0x2), /* exclude user */
269 !(en & 0x1), /* exclude kernel */
Andi Kleen103af0a2013-07-18 15:57:02 -0700270 pmi, false, false);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200271}
272
273static inline u8 fixed_en_pmi(u64 ctrl, int idx)
274{
275 return (ctrl >> (idx * 4)) & 0xf;
276}
277
278static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
279{
280 int i;
281
282 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
283 u8 en_pmi = fixed_en_pmi(data, i);
284 struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
285
286 if (fixed_en_pmi(pmu->fixed_ctr_ctrl, i) == en_pmi)
287 continue;
288
289 reprogram_fixed_counter(pmc, en_pmi, i);
290 }
291
292 pmu->fixed_ctr_ctrl = data;
293}
294
295static void reprogram_idx(struct kvm_pmu *pmu, int idx)
296{
297 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
298
299 if (!pmc)
300 return;
301
302 if (pmc_is_gp(pmc))
303 reprogram_gp_counter(pmc, pmc->eventsel);
304 else {
Robert Richter15c7ad52012-06-20 20:46:33 +0200305 int fidx = idx - INTEL_PMC_IDX_FIXED;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200306 reprogram_fixed_counter(pmc,
307 fixed_en_pmi(pmu->fixed_ctr_ctrl, fidx), fidx);
308 }
309}
310
311static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
312{
313 int bit;
314 u64 diff = pmu->global_ctrl ^ data;
315
316 pmu->global_ctrl = data;
317
318 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
319 reprogram_idx(pmu, bit);
320}
321
322bool kvm_pmu_msr(struct kvm_vcpu *vcpu, u32 msr)
323{
324 struct kvm_pmu *pmu = &vcpu->arch.pmu;
325 int ret;
326
327 switch (msr) {
328 case MSR_CORE_PERF_FIXED_CTR_CTRL:
329 case MSR_CORE_PERF_GLOBAL_STATUS:
330 case MSR_CORE_PERF_GLOBAL_CTRL:
331 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
332 ret = pmu->version > 1;
333 break;
334 default:
335 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
336 || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
337 || get_fixed_pmc(pmu, msr);
338 break;
339 }
340 return ret;
341}
342
343int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
344{
345 struct kvm_pmu *pmu = &vcpu->arch.pmu;
346 struct kvm_pmc *pmc;
347
348 switch (index) {
349 case MSR_CORE_PERF_FIXED_CTR_CTRL:
350 *data = pmu->fixed_ctr_ctrl;
351 return 0;
352 case MSR_CORE_PERF_GLOBAL_STATUS:
353 *data = pmu->global_status;
354 return 0;
355 case MSR_CORE_PERF_GLOBAL_CTRL:
356 *data = pmu->global_ctrl;
357 return 0;
358 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
359 *data = pmu->global_ovf_ctrl;
360 return 0;
361 default:
362 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
363 (pmc = get_fixed_pmc(pmu, index))) {
364 *data = read_pmc(pmc);
365 return 0;
366 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
367 *data = pmc->eventsel;
368 return 0;
369 }
370 }
371 return 1;
372}
373
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100374int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200375{
376 struct kvm_pmu *pmu = &vcpu->arch.pmu;
377 struct kvm_pmc *pmc;
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100378 u32 index = msr_info->index;
379 u64 data = msr_info->data;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200380
381 switch (index) {
382 case MSR_CORE_PERF_FIXED_CTR_CTRL:
383 if (pmu->fixed_ctr_ctrl == data)
384 return 0;
Sasikantha babufea52952012-03-21 18:49:00 +0530385 if (!(data & 0xfffffffffffff444ull)) {
Gleb Natapovf5132b02011-11-10 14:57:22 +0200386 reprogram_fixed_counters(pmu, data);
387 return 0;
388 }
389 break;
390 case MSR_CORE_PERF_GLOBAL_STATUS:
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100391 if (msr_info->host_initiated) {
392 pmu->global_status = data;
393 return 0;
394 }
Gleb Natapovf5132b02011-11-10 14:57:22 +0200395 break; /* RO MSR */
396 case MSR_CORE_PERF_GLOBAL_CTRL:
397 if (pmu->global_ctrl == data)
398 return 0;
399 if (!(data & pmu->global_ctrl_mask)) {
400 global_ctrl_changed(pmu, data);
401 return 0;
402 }
403 break;
404 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
405 if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100406 if (!msr_info->host_initiated)
407 pmu->global_status &= ~data;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200408 pmu->global_ovf_ctrl = data;
409 return 0;
410 }
411 break;
412 default:
413 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
414 (pmc = get_fixed_pmc(pmu, index))) {
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100415 if (!msr_info->host_initiated)
416 data = (s64)(s32)data;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200417 pmc->counter += data - read_pmc(pmc);
418 return 0;
419 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
420 if (data == pmc->eventsel)
421 return 0;
Andi Kleen103af0a2013-07-18 15:57:02 -0700422 if (!(data & pmu->reserved_bits)) {
Gleb Natapovf5132b02011-11-10 14:57:22 +0200423 reprogram_gp_counter(pmc, data);
424 return 0;
425 }
426 }
427 }
428 return 1;
429}
430
431int kvm_pmu_read_pmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
432{
433 struct kvm_pmu *pmu = &vcpu->arch.pmu;
434 bool fast_mode = pmc & (1u << 31);
435 bool fixed = pmc & (1u << 30);
436 struct kvm_pmc *counters;
437 u64 ctr;
438
Gleb Natapov270c6c72012-02-16 14:44:11 +0200439 pmc &= ~(3u << 30);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200440 if (!fixed && pmc >= pmu->nr_arch_gp_counters)
441 return 1;
442 if (fixed && pmc >= pmu->nr_arch_fixed_counters)
443 return 1;
444 counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
445 ctr = read_pmc(&counters[pmc]);
446 if (fast_mode)
447 ctr = (u32)ctr;
448 *data = ctr;
449
450 return 0;
451}
452
453void kvm_pmu_cpuid_update(struct kvm_vcpu *vcpu)
454{
455 struct kvm_pmu *pmu = &vcpu->arch.pmu;
456 struct kvm_cpuid_entry2 *entry;
457 unsigned bitmap_len;
458
459 pmu->nr_arch_gp_counters = 0;
460 pmu->nr_arch_fixed_counters = 0;
461 pmu->counter_bitmask[KVM_PMC_GP] = 0;
462 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
463 pmu->version = 0;
Andi Kleen103af0a2013-07-18 15:57:02 -0700464 pmu->reserved_bits = 0xffffffff00200000ull;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200465
466 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
467 if (!entry)
468 return;
469
470 pmu->version = entry->eax & 0xff;
471 if (!pmu->version)
472 return;
473
474 pmu->nr_arch_gp_counters = min((int)(entry->eax >> 8) & 0xff,
Robert Richter15c7ad52012-06-20 20:46:33 +0200475 INTEL_PMC_MAX_GENERIC);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200476 pmu->counter_bitmask[KVM_PMC_GP] =
477 ((u64)1 << ((entry->eax >> 16) & 0xff)) - 1;
478 bitmap_len = (entry->eax >> 24) & 0xff;
479 pmu->available_event_types = ~entry->ebx & ((1ull << bitmap_len) - 1);
480
481 if (pmu->version == 1) {
Gleb Natapovf19a0c22012-04-09 17:38:35 +0300482 pmu->nr_arch_fixed_counters = 0;
483 } else {
484 pmu->nr_arch_fixed_counters = min((int)(entry->edx & 0x1f),
Robert Richter15c7ad52012-06-20 20:46:33 +0200485 INTEL_PMC_MAX_FIXED);
Gleb Natapovf19a0c22012-04-09 17:38:35 +0300486 pmu->counter_bitmask[KVM_PMC_FIXED] =
487 ((u64)1 << ((entry->edx >> 5) & 0xff)) - 1;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200488 }
489
Gleb Natapovf19a0c22012-04-09 17:38:35 +0300490 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
Robert Richter15c7ad52012-06-20 20:46:33 +0200491 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
Gleb Natapovf19a0c22012-04-09 17:38:35 +0300492 pmu->global_ctrl_mask = ~pmu->global_ctrl;
Andi Kleen103af0a2013-07-18 15:57:02 -0700493
494 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
495 if (entry &&
496 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
497 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
498 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200499}
500
501void kvm_pmu_init(struct kvm_vcpu *vcpu)
502{
503 int i;
504 struct kvm_pmu *pmu = &vcpu->arch.pmu;
505
506 memset(pmu, 0, sizeof(*pmu));
Robert Richter15c7ad52012-06-20 20:46:33 +0200507 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
Gleb Natapovf5132b02011-11-10 14:57:22 +0200508 pmu->gp_counters[i].type = KVM_PMC_GP;
509 pmu->gp_counters[i].vcpu = vcpu;
510 pmu->gp_counters[i].idx = i;
511 }
Robert Richter15c7ad52012-06-20 20:46:33 +0200512 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
Gleb Natapovf5132b02011-11-10 14:57:22 +0200513 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
514 pmu->fixed_counters[i].vcpu = vcpu;
Robert Richter15c7ad52012-06-20 20:46:33 +0200515 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200516 }
517 init_irq_work(&pmu->irq_work, trigger_pmi);
518 kvm_pmu_cpuid_update(vcpu);
519}
520
521void kvm_pmu_reset(struct kvm_vcpu *vcpu)
522{
523 struct kvm_pmu *pmu = &vcpu->arch.pmu;
524 int i;
525
526 irq_work_sync(&pmu->irq_work);
Robert Richter15c7ad52012-06-20 20:46:33 +0200527 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
Gleb Natapovf5132b02011-11-10 14:57:22 +0200528 struct kvm_pmc *pmc = &pmu->gp_counters[i];
529 stop_counter(pmc);
530 pmc->counter = pmc->eventsel = 0;
531 }
532
Robert Richter15c7ad52012-06-20 20:46:33 +0200533 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200534 stop_counter(&pmu->fixed_counters[i]);
535
536 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
537 pmu->global_ovf_ctrl = 0;
538}
539
540void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
541{
542 kvm_pmu_reset(vcpu);
543}
544
545void kvm_handle_pmu_event(struct kvm_vcpu *vcpu)
546{
547 struct kvm_pmu *pmu = &vcpu->arch.pmu;
548 u64 bitmask;
549 int bit;
550
551 bitmask = pmu->reprogram_pmi;
552
553 for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
554 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
555
556 if (unlikely(!pmc || !pmc->perf_event)) {
557 clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
558 continue;
559 }
560
561 reprogram_idx(pmu, bit);
562 }
563}