blob: 3d2990207be2e66026982c324d35f44e2fcd67c7 [file] [log] [blame]
Gleb Natapovf5132b02011-11-10 14:57:22 +02001/*
Guo Chaoc7a70622012-06-28 15:23:08 +08002 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
Gleb Natapovf5132b02011-11-10 14:57:22 +02003 *
4 * Copyright 2011 Red Hat, Inc. and/or its affiliates.
5 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 * Gleb Natapov <gleb@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2. See
11 * the COPYING file in the top-level directory.
12 *
13 */
14
15#include <linux/types.h>
16#include <linux/kvm_host.h>
17#include <linux/perf_event.h>
Nadav Amitd27aa7f2014-08-20 13:25:52 +030018#include <asm/perf_event.h>
Gleb Natapovf5132b02011-11-10 14:57:22 +020019#include "x86.h"
20#include "cpuid.h"
21#include "lapic.h"
Wei Huang474a5bb2015-06-19 13:54:23 +020022#include "pmu.h"
Gleb Natapovf5132b02011-11-10 14:57:22 +020023
Wei Huang474a5bb2015-06-19 13:54:23 +020024static struct kvm_event_hw_type_mapping arch_events[] = {
Gleb Natapovf5132b02011-11-10 14:57:22 +020025 /* Index must match CPUID 0x0A.EBX bit vector */
26 [0] = { 0x3c, 0x00, PERF_COUNT_HW_CPU_CYCLES },
27 [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS },
28 [2] = { 0x3c, 0x01, PERF_COUNT_HW_BUS_CYCLES },
29 [3] = { 0x2e, 0x4f, PERF_COUNT_HW_CACHE_REFERENCES },
30 [4] = { 0x2e, 0x41, PERF_COUNT_HW_CACHE_MISSES },
31 [5] = { 0xc4, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS },
32 [6] = { 0xc5, 0x00, PERF_COUNT_HW_BRANCH_MISSES },
Gleb Natapov62079d82012-02-26 16:55:42 +020033 [7] = { 0x00, 0x30, PERF_COUNT_HW_REF_CPU_CYCLES },
Gleb Natapovf5132b02011-11-10 14:57:22 +020034};
35
36/* mapping between fixed pmc index and arch_events array */
Xiubo Li52eb5a62015-03-13 17:39:45 +080037static int fixed_pmc_events[] = {1, 0, 7};
Gleb Natapovf5132b02011-11-10 14:57:22 +020038
39static bool pmc_is_gp(struct kvm_pmc *pmc)
40{
41 return pmc->type == KVM_PMC_GP;
42}
43
44static inline u64 pmc_bitmask(struct kvm_pmc *pmc)
45{
46 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
47
48 return pmu->counter_bitmask[pmc->type];
49}
50
Wei Huangc6702c92015-06-19 13:44:45 +020051static inline bool pmc_is_enabled(struct kvm_pmc *pmc)
Gleb Natapovf5132b02011-11-10 14:57:22 +020052{
53 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
54 return test_bit(pmc->idx, (unsigned long *)&pmu->global_ctrl);
55}
56
57static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr,
58 u32 base)
59{
60 if (msr >= base && msr < base + pmu->nr_arch_gp_counters)
61 return &pmu->gp_counters[msr - base];
62 return NULL;
63}
64
65static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr)
66{
67 int base = MSR_CORE_PERF_FIXED_CTR0;
68 if (msr >= base && msr < base + pmu->nr_arch_fixed_counters)
69 return &pmu->fixed_counters[msr - base];
70 return NULL;
71}
72
73static inline struct kvm_pmc *get_fixed_pmc_idx(struct kvm_pmu *pmu, int idx)
74{
75 return get_fixed_pmc(pmu, MSR_CORE_PERF_FIXED_CTR0 + idx);
76}
77
78static struct kvm_pmc *global_idx_to_pmc(struct kvm_pmu *pmu, int idx)
79{
Robert Richter15c7ad52012-06-20 20:46:33 +020080 if (idx < INTEL_PMC_IDX_FIXED)
Gleb Natapovf5132b02011-11-10 14:57:22 +020081 return get_gp_pmc(pmu, MSR_P6_EVNTSEL0 + idx, MSR_P6_EVNTSEL0);
82 else
Robert Richter15c7ad52012-06-20 20:46:33 +020083 return get_fixed_pmc_idx(pmu, idx - INTEL_PMC_IDX_FIXED);
Gleb Natapovf5132b02011-11-10 14:57:22 +020084}
85
Wei Huangc6702c92015-06-19 13:44:45 +020086void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
Gleb Natapovf5132b02011-11-10 14:57:22 +020087{
88 if (vcpu->arch.apic)
89 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
90}
91
Wei Huangc6702c92015-06-19 13:44:45 +020092static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
Gleb Natapovf5132b02011-11-10 14:57:22 +020093{
94 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu,
95 irq_work);
96 struct kvm_vcpu *vcpu = container_of(pmu, struct kvm_vcpu,
97 arch.pmu);
98
Wei Huangc6702c92015-06-19 13:44:45 +020099 kvm_pmu_deliver_pmi(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200100}
101
102static void kvm_perf_overflow(struct perf_event *perf_event,
103 struct perf_sample_data *data,
104 struct pt_regs *regs)
105{
106 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
107 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
Nadav Amit671bd992014-04-18 03:35:08 +0300108 if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
109 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
110 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
111 }
Gleb Natapovf5132b02011-11-10 14:57:22 +0200112}
113
114static void kvm_perf_overflow_intr(struct perf_event *perf_event,
115 struct perf_sample_data *data, struct pt_regs *regs)
116{
117 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
118 struct kvm_pmu *pmu = &pmc->vcpu->arch.pmu;
119 if (!test_and_set_bit(pmc->idx, (unsigned long *)&pmu->reprogram_pmi)) {
Nadav Amit671bd992014-04-18 03:35:08 +0300120 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200121 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
122 /*
123 * Inject PMI. If vcpu was in a guest mode during NMI PMI
124 * can be ejected on a guest mode re-entry. Otherwise we can't
125 * be sure that vcpu wasn't executing hlt instruction at the
126 * time of vmexit and is not going to re-enter guest mode until,
127 * woken up. So we should wake it, but this is impossible from
128 * NMI context. Do it from irq work instead.
129 */
130 if (!kvm_is_in_guest())
131 irq_work_queue(&pmc->vcpu->arch.pmu.irq_work);
132 else
133 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
134 }
135}
136
Wei Huangc6702c92015-06-19 13:44:45 +0200137static u64 pmc_read_counter(struct kvm_pmc *pmc)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200138{
139 u64 counter, enabled, running;
140
141 counter = pmc->counter;
142
143 if (pmc->perf_event)
144 counter += perf_event_read_value(pmc->perf_event,
145 &enabled, &running);
146
147 /* FIXME: Scaling needed? */
148
149 return counter & pmc_bitmask(pmc);
150}
151
Wei Huangc6702c92015-06-19 13:44:45 +0200152static void pmc_stop_counter(struct kvm_pmc *pmc)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200153{
154 if (pmc->perf_event) {
Wei Huangc6702c92015-06-19 13:44:45 +0200155 pmc->counter = pmc_read_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200156 perf_event_release_kernel(pmc->perf_event);
157 pmc->perf_event = NULL;
158 }
159}
160
Wei Huangc6702c92015-06-19 13:44:45 +0200161static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
Gleb Natapovf5132b02011-11-10 14:57:22 +0200162 unsigned config, bool exclude_user, bool exclude_kernel,
Andi Kleen103af0a2013-07-18 15:57:02 -0700163 bool intr, bool in_tx, bool in_tx_cp)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200164{
165 struct perf_event *event;
166 struct perf_event_attr attr = {
167 .type = type,
168 .size = sizeof(attr),
169 .pinned = true,
170 .exclude_idle = true,
171 .exclude_host = 1,
172 .exclude_user = exclude_user,
173 .exclude_kernel = exclude_kernel,
174 .config = config,
175 };
Andi Kleen103af0a2013-07-18 15:57:02 -0700176 if (in_tx)
177 attr.config |= HSW_IN_TX;
178 if (in_tx_cp)
179 attr.config |= HSW_IN_TX_CHECKPOINTED;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200180
181 attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
182
183 event = perf_event_create_kernel_counter(&attr, -1, current,
184 intr ? kvm_perf_overflow_intr :
185 kvm_perf_overflow, pmc);
186 if (IS_ERR(event)) {
187 printk_once("kvm: pmu event creation failed %ld\n",
188 PTR_ERR(event));
189 return;
190 }
191
192 pmc->perf_event = event;
193 clear_bit(pmc->idx, (unsigned long*)&pmc->vcpu->arch.pmu.reprogram_pmi);
194}
195
196static unsigned find_arch_event(struct kvm_pmu *pmu, u8 event_select,
197 u8 unit_mask)
198{
199 int i;
200
201 for (i = 0; i < ARRAY_SIZE(arch_events); i++)
202 if (arch_events[i].eventsel == event_select
203 && arch_events[i].unit_mask == unit_mask
204 && (pmu->available_event_types & (1 << i)))
205 break;
206
207 if (i == ARRAY_SIZE(arch_events))
208 return PERF_COUNT_HW_MAX;
209
210 return arch_events[i].event_type;
211}
212
213static void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
214{
215 unsigned config, type = PERF_TYPE_RAW;
216 u8 event_select, unit_mask;
217
Gleb Natapova7b9d2c2012-02-26 16:55:40 +0200218 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
219 printk_once("kvm pmu: pin control bit is ignored\n");
220
Gleb Natapovf5132b02011-11-10 14:57:22 +0200221 pmc->eventsel = eventsel;
222
Wei Huangc6702c92015-06-19 13:44:45 +0200223 pmc_stop_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200224
Wei Huangc6702c92015-06-19 13:44:45 +0200225 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
Gleb Natapovf5132b02011-11-10 14:57:22 +0200226 return;
227
228 event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
229 unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
230
Gleb Natapovfac33682012-02-26 16:55:41 +0200231 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
Gleb Natapovf5132b02011-11-10 14:57:22 +0200232 ARCH_PERFMON_EVENTSEL_INV |
Andi Kleen103af0a2013-07-18 15:57:02 -0700233 ARCH_PERFMON_EVENTSEL_CMASK |
234 HSW_IN_TX |
235 HSW_IN_TX_CHECKPOINTED))) {
Gleb Natapovf5132b02011-11-10 14:57:22 +0200236 config = find_arch_event(&pmc->vcpu->arch.pmu, event_select,
237 unit_mask);
238 if (config != PERF_COUNT_HW_MAX)
239 type = PERF_TYPE_HARDWARE;
240 }
241
242 if (type == PERF_TYPE_RAW)
243 config = eventsel & X86_RAW_EVENT_MASK;
244
Wei Huangc6702c92015-06-19 13:44:45 +0200245 pmc_reprogram_counter(pmc, type, config,
Gleb Natapovf5132b02011-11-10 14:57:22 +0200246 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
247 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
Andi Kleen103af0a2013-07-18 15:57:02 -0700248 eventsel & ARCH_PERFMON_EVENTSEL_INT,
249 (eventsel & HSW_IN_TX),
250 (eventsel & HSW_IN_TX_CHECKPOINTED));
Gleb Natapovf5132b02011-11-10 14:57:22 +0200251}
252
253static void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 en_pmi, int idx)
254{
255 unsigned en = en_pmi & 0x3;
256 bool pmi = en_pmi & 0x8;
257
Wei Huangc6702c92015-06-19 13:44:45 +0200258 pmc_stop_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200259
Wei Huangc6702c92015-06-19 13:44:45 +0200260 if (!en || !pmc_is_enabled(pmc))
Gleb Natapovf5132b02011-11-10 14:57:22 +0200261 return;
262
Wei Huangc6702c92015-06-19 13:44:45 +0200263 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
Gleb Natapovf5132b02011-11-10 14:57:22 +0200264 arch_events[fixed_pmc_events[idx]].event_type,
265 !(en & 0x2), /* exclude user */
266 !(en & 0x1), /* exclude kernel */
Andi Kleen103af0a2013-07-18 15:57:02 -0700267 pmi, false, false);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200268}
269
Wei Huangc6702c92015-06-19 13:44:45 +0200270static inline u8 fixed_ctrl_field(u64 ctrl, int idx)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200271{
272 return (ctrl >> (idx * 4)) & 0xf;
273}
274
275static void reprogram_fixed_counters(struct kvm_pmu *pmu, u64 data)
276{
277 int i;
278
279 for (i = 0; i < pmu->nr_arch_fixed_counters; i++) {
Wei Huangc6702c92015-06-19 13:44:45 +0200280 u8 en_pmi = fixed_ctrl_field(data, i);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200281 struct kvm_pmc *pmc = get_fixed_pmc_idx(pmu, i);
282
Wei Huangc6702c92015-06-19 13:44:45 +0200283 if (fixed_ctrl_field(pmu->fixed_ctr_ctrl, i) == en_pmi)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200284 continue;
285
286 reprogram_fixed_counter(pmc, en_pmi, i);
287 }
288
289 pmu->fixed_ctr_ctrl = data;
290}
291
Wei Huangc6702c92015-06-19 13:44:45 +0200292static void reprogram_counter(struct kvm_pmu *pmu, int idx)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200293{
294 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, idx);
295
296 if (!pmc)
297 return;
298
299 if (pmc_is_gp(pmc))
300 reprogram_gp_counter(pmc, pmc->eventsel);
301 else {
Robert Richter15c7ad52012-06-20 20:46:33 +0200302 int fidx = idx - INTEL_PMC_IDX_FIXED;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200303 reprogram_fixed_counter(pmc,
Wei Huangc6702c92015-06-19 13:44:45 +0200304 fixed_ctrl_field(pmu->fixed_ctr_ctrl, fidx), fidx);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200305 }
306}
307
308static void global_ctrl_changed(struct kvm_pmu *pmu, u64 data)
309{
310 int bit;
311 u64 diff = pmu->global_ctrl ^ data;
312
313 pmu->global_ctrl = data;
314
315 for_each_set_bit(bit, (unsigned long *)&diff, X86_PMC_IDX_MAX)
Wei Huangc6702c92015-06-19 13:44:45 +0200316 reprogram_counter(pmu, bit);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200317}
318
Wei Huangc6702c92015-06-19 13:44:45 +0200319bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200320{
321 struct kvm_pmu *pmu = &vcpu->arch.pmu;
322 int ret;
323
324 switch (msr) {
325 case MSR_CORE_PERF_FIXED_CTR_CTRL:
326 case MSR_CORE_PERF_GLOBAL_STATUS:
327 case MSR_CORE_PERF_GLOBAL_CTRL:
328 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
329 ret = pmu->version > 1;
330 break;
331 default:
332 ret = get_gp_pmc(pmu, msr, MSR_IA32_PERFCTR0)
333 || get_gp_pmc(pmu, msr, MSR_P6_EVNTSEL0)
334 || get_fixed_pmc(pmu, msr);
335 break;
336 }
337 return ret;
338}
339
340int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 index, u64 *data)
341{
342 struct kvm_pmu *pmu = &vcpu->arch.pmu;
343 struct kvm_pmc *pmc;
344
345 switch (index) {
346 case MSR_CORE_PERF_FIXED_CTR_CTRL:
347 *data = pmu->fixed_ctr_ctrl;
348 return 0;
349 case MSR_CORE_PERF_GLOBAL_STATUS:
350 *data = pmu->global_status;
351 return 0;
352 case MSR_CORE_PERF_GLOBAL_CTRL:
353 *data = pmu->global_ctrl;
354 return 0;
355 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
356 *data = pmu->global_ovf_ctrl;
357 return 0;
358 default:
359 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
360 (pmc = get_fixed_pmc(pmu, index))) {
Wei Huangc6702c92015-06-19 13:44:45 +0200361 *data = pmc_read_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200362 return 0;
363 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
364 *data = pmc->eventsel;
365 return 0;
366 }
367 }
368 return 1;
369}
370
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100371int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200372{
373 struct kvm_pmu *pmu = &vcpu->arch.pmu;
374 struct kvm_pmc *pmc;
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100375 u32 index = msr_info->index;
376 u64 data = msr_info->data;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200377
378 switch (index) {
379 case MSR_CORE_PERF_FIXED_CTR_CTRL:
380 if (pmu->fixed_ctr_ctrl == data)
381 return 0;
Sasikantha babufea52952012-03-21 18:49:00 +0530382 if (!(data & 0xfffffffffffff444ull)) {
Gleb Natapovf5132b02011-11-10 14:57:22 +0200383 reprogram_fixed_counters(pmu, data);
384 return 0;
385 }
386 break;
387 case MSR_CORE_PERF_GLOBAL_STATUS:
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100388 if (msr_info->host_initiated) {
389 pmu->global_status = data;
390 return 0;
391 }
Gleb Natapovf5132b02011-11-10 14:57:22 +0200392 break; /* RO MSR */
393 case MSR_CORE_PERF_GLOBAL_CTRL:
394 if (pmu->global_ctrl == data)
395 return 0;
396 if (!(data & pmu->global_ctrl_mask)) {
397 global_ctrl_changed(pmu, data);
398 return 0;
399 }
400 break;
401 case MSR_CORE_PERF_GLOBAL_OVF_CTRL:
402 if (!(data & (pmu->global_ctrl_mask & ~(3ull<<62)))) {
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100403 if (!msr_info->host_initiated)
404 pmu->global_status &= ~data;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200405 pmu->global_ovf_ctrl = data;
406 return 0;
407 }
408 break;
409 default:
410 if ((pmc = get_gp_pmc(pmu, index, MSR_IA32_PERFCTR0)) ||
411 (pmc = get_fixed_pmc(pmu, index))) {
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100412 if (!msr_info->host_initiated)
413 data = (s64)(s32)data;
Wei Huangc6702c92015-06-19 13:44:45 +0200414 pmc->counter += data - pmc_read_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200415 return 0;
416 } else if ((pmc = get_gp_pmc(pmu, index, MSR_P6_EVNTSEL0))) {
417 if (data == pmc->eventsel)
418 return 0;
Andi Kleen103af0a2013-07-18 15:57:02 -0700419 if (!(data & pmu->reserved_bits)) {
Gleb Natapovf5132b02011-11-10 14:57:22 +0200420 reprogram_gp_counter(pmc, data);
421 return 0;
422 }
423 }
424 }
425 return 1;
426}
427
Wei Huangc6702c92015-06-19 13:44:45 +0200428int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned pmc)
Nadav Amit67f4d422014-06-02 18:34:09 +0300429{
430 struct kvm_pmu *pmu = &vcpu->arch.pmu;
431 bool fixed = pmc & (1u << 30);
432 pmc &= ~(3u << 30);
433 return (!fixed && pmc >= pmu->nr_arch_gp_counters) ||
434 (fixed && pmc >= pmu->nr_arch_fixed_counters);
435}
436
Wei Huangc6702c92015-06-19 13:44:45 +0200437int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200438{
439 struct kvm_pmu *pmu = &vcpu->arch.pmu;
440 bool fast_mode = pmc & (1u << 31);
441 bool fixed = pmc & (1u << 30);
442 struct kvm_pmc *counters;
443 u64 ctr;
444
Gleb Natapov270c6c72012-02-16 14:44:11 +0200445 pmc &= ~(3u << 30);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200446 if (!fixed && pmc >= pmu->nr_arch_gp_counters)
447 return 1;
448 if (fixed && pmc >= pmu->nr_arch_fixed_counters)
449 return 1;
450 counters = fixed ? pmu->fixed_counters : pmu->gp_counters;
Wei Huangc6702c92015-06-19 13:44:45 +0200451 ctr = pmc_read_counter(&counters[pmc]);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200452 if (fast_mode)
453 ctr = (u32)ctr;
454 *data = ctr;
455
456 return 0;
457}
458
Wei Huangc6702c92015-06-19 13:44:45 +0200459void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200460{
461 struct kvm_pmu *pmu = &vcpu->arch.pmu;
462 struct kvm_cpuid_entry2 *entry;
Nadav Amitd27aa7f2014-08-20 13:25:52 +0300463 union cpuid10_eax eax;
464 union cpuid10_edx edx;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200465
466 pmu->nr_arch_gp_counters = 0;
467 pmu->nr_arch_fixed_counters = 0;
468 pmu->counter_bitmask[KVM_PMC_GP] = 0;
469 pmu->counter_bitmask[KVM_PMC_FIXED] = 0;
470 pmu->version = 0;
Andi Kleen103af0a2013-07-18 15:57:02 -0700471 pmu->reserved_bits = 0xffffffff00200000ull;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200472
473 entry = kvm_find_cpuid_entry(vcpu, 0xa, 0);
474 if (!entry)
475 return;
Nadav Amitd27aa7f2014-08-20 13:25:52 +0300476 eax.full = entry->eax;
477 edx.full = entry->edx;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200478
Nadav Amitd27aa7f2014-08-20 13:25:52 +0300479 pmu->version = eax.split.version_id;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200480 if (!pmu->version)
481 return;
482
Nadav Amitd27aa7f2014-08-20 13:25:52 +0300483 pmu->nr_arch_gp_counters = min_t(int, eax.split.num_counters,
484 INTEL_PMC_MAX_GENERIC);
485 pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << eax.split.bit_width) - 1;
486 pmu->available_event_types = ~entry->ebx &
487 ((1ull << eax.split.mask_length) - 1);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200488
489 if (pmu->version == 1) {
Gleb Natapovf19a0c22012-04-09 17:38:35 +0300490 pmu->nr_arch_fixed_counters = 0;
491 } else {
Nadav Amitd27aa7f2014-08-20 13:25:52 +0300492 pmu->nr_arch_fixed_counters =
493 min_t(int, edx.split.num_counters_fixed,
Robert Richter15c7ad52012-06-20 20:46:33 +0200494 INTEL_PMC_MAX_FIXED);
Gleb Natapovf19a0c22012-04-09 17:38:35 +0300495 pmu->counter_bitmask[KVM_PMC_FIXED] =
Nadav Amitd27aa7f2014-08-20 13:25:52 +0300496 ((u64)1 << edx.split.bit_width_fixed) - 1;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200497 }
498
Gleb Natapovf19a0c22012-04-09 17:38:35 +0300499 pmu->global_ctrl = ((1 << pmu->nr_arch_gp_counters) - 1) |
Robert Richter15c7ad52012-06-20 20:46:33 +0200500 (((1ull << pmu->nr_arch_fixed_counters) - 1) << INTEL_PMC_IDX_FIXED);
Gleb Natapovf19a0c22012-04-09 17:38:35 +0300501 pmu->global_ctrl_mask = ~pmu->global_ctrl;
Andi Kleen103af0a2013-07-18 15:57:02 -0700502
503 entry = kvm_find_cpuid_entry(vcpu, 7, 0);
504 if (entry &&
505 (boot_cpu_has(X86_FEATURE_HLE) || boot_cpu_has(X86_FEATURE_RTM)) &&
506 (entry->ebx & (X86_FEATURE_HLE|X86_FEATURE_RTM)))
507 pmu->reserved_bits ^= HSW_IN_TX|HSW_IN_TX_CHECKPOINTED;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200508}
509
510void kvm_pmu_init(struct kvm_vcpu *vcpu)
511{
512 int i;
513 struct kvm_pmu *pmu = &vcpu->arch.pmu;
514
515 memset(pmu, 0, sizeof(*pmu));
Robert Richter15c7ad52012-06-20 20:46:33 +0200516 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
Gleb Natapovf5132b02011-11-10 14:57:22 +0200517 pmu->gp_counters[i].type = KVM_PMC_GP;
518 pmu->gp_counters[i].vcpu = vcpu;
519 pmu->gp_counters[i].idx = i;
520 }
Robert Richter15c7ad52012-06-20 20:46:33 +0200521 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++) {
Gleb Natapovf5132b02011-11-10 14:57:22 +0200522 pmu->fixed_counters[i].type = KVM_PMC_FIXED;
523 pmu->fixed_counters[i].vcpu = vcpu;
Robert Richter15c7ad52012-06-20 20:46:33 +0200524 pmu->fixed_counters[i].idx = i + INTEL_PMC_IDX_FIXED;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200525 }
Wei Huangc6702c92015-06-19 13:44:45 +0200526 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
527 kvm_pmu_refresh(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200528}
529
530void kvm_pmu_reset(struct kvm_vcpu *vcpu)
531{
532 struct kvm_pmu *pmu = &vcpu->arch.pmu;
533 int i;
534
535 irq_work_sync(&pmu->irq_work);
Robert Richter15c7ad52012-06-20 20:46:33 +0200536 for (i = 0; i < INTEL_PMC_MAX_GENERIC; i++) {
Gleb Natapovf5132b02011-11-10 14:57:22 +0200537 struct kvm_pmc *pmc = &pmu->gp_counters[i];
Wei Huangc6702c92015-06-19 13:44:45 +0200538 pmc_stop_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200539 pmc->counter = pmc->eventsel = 0;
540 }
541
Robert Richter15c7ad52012-06-20 20:46:33 +0200542 for (i = 0; i < INTEL_PMC_MAX_FIXED; i++)
Wei Huangc6702c92015-06-19 13:44:45 +0200543 pmc_stop_counter(&pmu->fixed_counters[i]);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200544
545 pmu->fixed_ctr_ctrl = pmu->global_ctrl = pmu->global_status =
546 pmu->global_ovf_ctrl = 0;
547}
548
549void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
550{
551 kvm_pmu_reset(vcpu);
552}
553
Wei Huangc6702c92015-06-19 13:44:45 +0200554void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200555{
556 struct kvm_pmu *pmu = &vcpu->arch.pmu;
557 u64 bitmask;
558 int bit;
559
560 bitmask = pmu->reprogram_pmi;
561
562 for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
563 struct kvm_pmc *pmc = global_idx_to_pmc(pmu, bit);
564
565 if (unlikely(!pmc || !pmc->perf_event)) {
566 clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
567 continue;
568 }
569
Wei Huangc6702c92015-06-19 13:44:45 +0200570 reprogram_counter(pmu, bit);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200571 }
572}