blob: 026db42a86c3236d9be95e84319c9bb3ed85db75 [file] [log] [blame]
Gleb Natapovf5132b02011-11-10 14:57:22 +02001/*
Guo Chaoc7a70622012-06-28 15:23:08 +08002 * Kernel-based Virtual Machine -- Performance Monitoring Unit support
Gleb Natapovf5132b02011-11-10 14:57:22 +02003 *
Wei Huang25462f72015-06-19 15:45:05 +02004 * Copyright 2015 Red Hat, Inc. and/or its affiliates.
Gleb Natapovf5132b02011-11-10 14:57:22 +02005 *
6 * Authors:
7 * Avi Kivity <avi@redhat.com>
8 * Gleb Natapov <gleb@redhat.com>
Wei Huang25462f72015-06-19 15:45:05 +02009 * Wei Huang <wei@redhat.com>
Gleb Natapovf5132b02011-11-10 14:57:22 +020010 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
14 */
15
16#include <linux/types.h>
17#include <linux/kvm_host.h>
18#include <linux/perf_event.h>
Nadav Amitd27aa7f2014-08-20 13:25:52 +030019#include <asm/perf_event.h>
Gleb Natapovf5132b02011-11-10 14:57:22 +020020#include "x86.h"
21#include "cpuid.h"
22#include "lapic.h"
Wei Huang474a5bb2015-06-19 13:54:23 +020023#include "pmu.h"
Gleb Natapovf5132b02011-11-10 14:57:22 +020024
Wei Huang25462f72015-06-19 15:45:05 +020025/* NOTE:
26 * - Each perf counter is defined as "struct kvm_pmc";
27 * - There are two types of perf counters: general purpose (gp) and fixed.
28 * gp counters are stored in gp_counters[] and fixed counters are stored
29 * in fixed_counters[] respectively. Both of them are part of "struct
30 * kvm_pmu";
31 * - pmu.c understands the difference between gp counters and fixed counters.
32 * However AMD doesn't support fixed-counters;
33 * - There are three types of index to access perf counters (PMC):
34 * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD
35 * has MSR_K7_PERFCTRn.
36 * 2. MSR Index (named idx): This normally is used by RDPMC instruction.
37 * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access
38 * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except
39 * that it also supports fixed counters. idx can be used to as index to
40 * gp and fixed counters.
41 * 3. Global PMC Index (named pmc): pmc is an index specific to PMU
42 * code. Each pmc, stored in kvm_pmc.idx field, is unique across
43 * all perf counters (both gp and fixed). The mapping relationship
44 * between pmc and perf counters is as the following:
45 * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters
46 * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed
47 * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters
48 */
Gleb Natapovf5132b02011-11-10 14:57:22 +020049
Wei Huangc6702c92015-06-19 13:44:45 +020050static void kvm_pmi_trigger_fn(struct irq_work *irq_work)
Gleb Natapovf5132b02011-11-10 14:57:22 +020051{
Wei Huang212dba12015-06-19 14:00:33 +020052 struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work);
53 struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu);
Gleb Natapovf5132b02011-11-10 14:57:22 +020054
Wei Huangc6702c92015-06-19 13:44:45 +020055 kvm_pmu_deliver_pmi(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +020056}
57
58static void kvm_perf_overflow(struct perf_event *perf_event,
59 struct perf_sample_data *data,
60 struct pt_regs *regs)
61{
62 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
Wei Huang212dba12015-06-19 14:00:33 +020063 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
Wei Huange84cfe42015-06-19 14:15:28 +020064
65 if (!test_and_set_bit(pmc->idx,
66 (unsigned long *)&pmu->reprogram_pmi)) {
Nadav Amit671bd992014-04-18 03:35:08 +030067 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
68 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
69 }
Gleb Natapovf5132b02011-11-10 14:57:22 +020070}
71
72static void kvm_perf_overflow_intr(struct perf_event *perf_event,
Wei Huange84cfe42015-06-19 14:15:28 +020073 struct perf_sample_data *data,
74 struct pt_regs *regs)
Gleb Natapovf5132b02011-11-10 14:57:22 +020075{
76 struct kvm_pmc *pmc = perf_event->overflow_handler_context;
Wei Huang212dba12015-06-19 14:00:33 +020077 struct kvm_pmu *pmu = pmc_to_pmu(pmc);
Wei Huange84cfe42015-06-19 14:15:28 +020078
79 if (!test_and_set_bit(pmc->idx,
80 (unsigned long *)&pmu->reprogram_pmi)) {
Nadav Amit671bd992014-04-18 03:35:08 +030081 __set_bit(pmc->idx, (unsigned long *)&pmu->global_status);
Gleb Natapovf5132b02011-11-10 14:57:22 +020082 kvm_make_request(KVM_REQ_PMU, pmc->vcpu);
Wei Huange84cfe42015-06-19 14:15:28 +020083
Gleb Natapovf5132b02011-11-10 14:57:22 +020084 /*
85 * Inject PMI. If vcpu was in a guest mode during NMI PMI
86 * can be ejected on a guest mode re-entry. Otherwise we can't
87 * be sure that vcpu wasn't executing hlt instruction at the
Wei Huange84cfe42015-06-19 14:15:28 +020088 * time of vmexit and is not going to re-enter guest mode until
Gleb Natapovf5132b02011-11-10 14:57:22 +020089 * woken up. So we should wake it, but this is impossible from
90 * NMI context. Do it from irq work instead.
91 */
92 if (!kvm_is_in_guest())
Wei Huang212dba12015-06-19 14:00:33 +020093 irq_work_queue(&pmc_to_pmu(pmc)->irq_work);
Gleb Natapovf5132b02011-11-10 14:57:22 +020094 else
95 kvm_make_request(KVM_REQ_PMI, pmc->vcpu);
96 }
97}
98
Wei Huangc6702c92015-06-19 13:44:45 +020099static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type,
Wei Huange84cfe42015-06-19 14:15:28 +0200100 unsigned config, bool exclude_user,
101 bool exclude_kernel, bool intr,
102 bool in_tx, bool in_tx_cp)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200103{
104 struct perf_event *event;
105 struct perf_event_attr attr = {
106 .type = type,
107 .size = sizeof(attr),
108 .pinned = true,
109 .exclude_idle = true,
110 .exclude_host = 1,
111 .exclude_user = exclude_user,
112 .exclude_kernel = exclude_kernel,
113 .config = config,
114 };
Wei Huange84cfe42015-06-19 14:15:28 +0200115
Robert O'Callahanbba82fd2017-02-01 17:06:11 +1300116 attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc);
117
Andi Kleen103af0a2013-07-18 15:57:02 -0700118 if (in_tx)
119 attr.config |= HSW_IN_TX;
Robert O'Callahanbba82fd2017-02-01 17:06:11 +1300120 if (in_tx_cp) {
121 /*
122 * HSW_IN_TX_CHECKPOINTED is not supported with nonzero
123 * period. Just clear the sample period so at least
124 * allocating the counter doesn't fail.
125 */
126 attr.sample_period = 0;
Andi Kleen103af0a2013-07-18 15:57:02 -0700127 attr.config |= HSW_IN_TX_CHECKPOINTED;
Robert O'Callahanbba82fd2017-02-01 17:06:11 +1300128 }
Gleb Natapovf5132b02011-11-10 14:57:22 +0200129
130 event = perf_event_create_kernel_counter(&attr, -1, current,
131 intr ? kvm_perf_overflow_intr :
132 kvm_perf_overflow, pmc);
133 if (IS_ERR(event)) {
Wei Huange84cfe42015-06-19 14:15:28 +0200134 printk_once("kvm_pmu: event creation failed %ld\n",
135 PTR_ERR(event));
Gleb Natapovf5132b02011-11-10 14:57:22 +0200136 return;
137 }
138
139 pmc->perf_event = event;
Wei Huang212dba12015-06-19 14:00:33 +0200140 clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200141}
142
Wei Huang25462f72015-06-19 15:45:05 +0200143void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200144{
145 unsigned config, type = PERF_TYPE_RAW;
146 u8 event_select, unit_mask;
147
Gleb Natapova7b9d2c2012-02-26 16:55:40 +0200148 if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL)
149 printk_once("kvm pmu: pin control bit is ignored\n");
150
Gleb Natapovf5132b02011-11-10 14:57:22 +0200151 pmc->eventsel = eventsel;
152
Wei Huangc6702c92015-06-19 13:44:45 +0200153 pmc_stop_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200154
Wei Huangc6702c92015-06-19 13:44:45 +0200155 if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc))
Gleb Natapovf5132b02011-11-10 14:57:22 +0200156 return;
157
158 event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT;
159 unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8;
160
Gleb Natapovfac33682012-02-26 16:55:41 +0200161 if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE |
Wei Huange84cfe42015-06-19 14:15:28 +0200162 ARCH_PERFMON_EVENTSEL_INV |
163 ARCH_PERFMON_EVENTSEL_CMASK |
164 HSW_IN_TX |
165 HSW_IN_TX_CHECKPOINTED))) {
Wei Huang25462f72015-06-19 15:45:05 +0200166 config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc),
167 event_select,
168 unit_mask);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200169 if (config != PERF_COUNT_HW_MAX)
170 type = PERF_TYPE_HARDWARE;
171 }
172
173 if (type == PERF_TYPE_RAW)
174 config = eventsel & X86_RAW_EVENT_MASK;
175
Wei Huangc6702c92015-06-19 13:44:45 +0200176 pmc_reprogram_counter(pmc, type, config,
Wei Huange84cfe42015-06-19 14:15:28 +0200177 !(eventsel & ARCH_PERFMON_EVENTSEL_USR),
178 !(eventsel & ARCH_PERFMON_EVENTSEL_OS),
179 eventsel & ARCH_PERFMON_EVENTSEL_INT,
180 (eventsel & HSW_IN_TX),
181 (eventsel & HSW_IN_TX_CHECKPOINTED));
Gleb Natapovf5132b02011-11-10 14:57:22 +0200182}
Wei Huang25462f72015-06-19 15:45:05 +0200183EXPORT_SYMBOL_GPL(reprogram_gp_counter);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200184
Wei Huang25462f72015-06-19 15:45:05 +0200185void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200186{
Wei Huange84cfe42015-06-19 14:15:28 +0200187 unsigned en_field = ctrl & 0x3;
188 bool pmi = ctrl & 0x8;
Gleb Natapovf5132b02011-11-10 14:57:22 +0200189
Wei Huangc6702c92015-06-19 13:44:45 +0200190 pmc_stop_counter(pmc);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200191
Wei Huange84cfe42015-06-19 14:15:28 +0200192 if (!en_field || !pmc_is_enabled(pmc))
Gleb Natapovf5132b02011-11-10 14:57:22 +0200193 return;
194
Wei Huangc6702c92015-06-19 13:44:45 +0200195 pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE,
Wei Huang25462f72015-06-19 15:45:05 +0200196 kvm_x86_ops->pmu_ops->find_fixed_event(idx),
Wei Huange84cfe42015-06-19 14:15:28 +0200197 !(en_field & 0x2), /* exclude user */
198 !(en_field & 0x1), /* exclude kernel */
199 pmi, false, false);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200200}
Wei Huang25462f72015-06-19 15:45:05 +0200201EXPORT_SYMBOL_GPL(reprogram_fixed_counter);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200202
Wei Huang25462f72015-06-19 15:45:05 +0200203void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200204{
Wei Huang25462f72015-06-19 15:45:05 +0200205 struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200206
207 if (!pmc)
208 return;
209
210 if (pmc_is_gp(pmc))
211 reprogram_gp_counter(pmc, pmc->eventsel);
212 else {
Wei Huange84cfe42015-06-19 14:15:28 +0200213 int idx = pmc_idx - INTEL_PMC_IDX_FIXED;
214 u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx);
215
216 reprogram_fixed_counter(pmc, ctrl, idx);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200217 }
218}
Wei Huang25462f72015-06-19 15:45:05 +0200219EXPORT_SYMBOL_GPL(reprogram_counter);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200220
Wei Huange5af0582015-06-19 15:51:47 +0200221void kvm_pmu_handle_event(struct kvm_vcpu *vcpu)
222{
223 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
224 u64 bitmask;
225 int bit;
226
227 bitmask = pmu->reprogram_pmi;
228
229 for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) {
Wei Huang25462f72015-06-19 15:45:05 +0200230 struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit);
Wei Huange5af0582015-06-19 15:51:47 +0200231
232 if (unlikely(!pmc || !pmc->perf_event)) {
233 clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi);
234 continue;
235 }
236
237 reprogram_counter(pmu, bit);
238 }
239}
240
241/* check if idx is a valid index to access PMU */
242int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx)
243{
Wei Huang25462f72015-06-19 15:45:05 +0200244 return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx);
Wei Huang41aac142015-06-19 16:16:59 +0200245}
246
247int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data)
248{
249 bool fast_mode = idx & (1u << 31);
250 struct kvm_pmc *pmc;
251 u64 ctr_val;
252
Wei Huang25462f72015-06-19 15:45:05 +0200253 pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx);
Wei Huang41aac142015-06-19 16:16:59 +0200254 if (!pmc)
255 return 1;
256
257 ctr_val = pmc_read_counter(pmc);
Wei Huange5af0582015-06-19 15:51:47 +0200258 if (fast_mode)
259 ctr_val = (u32)ctr_val;
260
261 *data = ctr_val;
262 return 0;
263}
264
265void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu)
266{
Paolo Bonzinibce87cc2016-01-08 13:48:51 +0100267 if (lapic_in_kernel(vcpu))
Wei Huange5af0582015-06-19 15:51:47 +0200268 kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC);
269}
270
Wei Huangc6702c92015-06-19 13:44:45 +0200271bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200272{
Wei Huang25462f72015-06-19 15:45:05 +0200273 return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200274}
275
Wei Huang25462f72015-06-19 15:45:05 +0200276int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200277{
Wei Huang25462f72015-06-19 15:45:05 +0200278 return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200279}
280
Paolo Bonziniafd80d82013-03-28 17:18:35 +0100281int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200282{
Wei Huang25462f72015-06-19 15:45:05 +0200283 return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200284}
285
Wei Huange84cfe42015-06-19 14:15:28 +0200286/* refresh PMU settings. This function generally is called when underlying
287 * settings are changed (such as changes of PMU CPUID by guest VMs), which
288 * should rarely happen.
289 */
Wei Huangc6702c92015-06-19 13:44:45 +0200290void kvm_pmu_refresh(struct kvm_vcpu *vcpu)
Gleb Natapovf5132b02011-11-10 14:57:22 +0200291{
Wei Huang25462f72015-06-19 15:45:05 +0200292 kvm_x86_ops->pmu_ops->refresh(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200293}
294
Wei Huange5af0582015-06-19 15:51:47 +0200295void kvm_pmu_reset(struct kvm_vcpu *vcpu)
296{
297 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200298
299 irq_work_sync(&pmu->irq_work);
Wei Huang25462f72015-06-19 15:45:05 +0200300 kvm_x86_ops->pmu_ops->reset(vcpu);
Wei Huange5af0582015-06-19 15:51:47 +0200301}
302
Gleb Natapovf5132b02011-11-10 14:57:22 +0200303void kvm_pmu_init(struct kvm_vcpu *vcpu)
304{
Wei Huang212dba12015-06-19 14:00:33 +0200305 struct kvm_pmu *pmu = vcpu_to_pmu(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200306
307 memset(pmu, 0, sizeof(*pmu));
Wei Huang25462f72015-06-19 15:45:05 +0200308 kvm_x86_ops->pmu_ops->init(vcpu);
Wei Huangc6702c92015-06-19 13:44:45 +0200309 init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn);
310 kvm_pmu_refresh(vcpu);
Gleb Natapovf5132b02011-11-10 14:57:22 +0200311}
312
Gleb Natapovf5132b02011-11-10 14:57:22 +0200313void kvm_pmu_destroy(struct kvm_vcpu *vcpu)
314{
315 kvm_pmu_reset(vcpu);
316}