Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 1 | /* |
Guo Chao | c7a7062 | 2012-06-28 15:23:08 +0800 | [diff] [blame] | 2 | * Kernel-based Virtual Machine -- Performance Monitoring Unit support |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 3 | * |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 4 | * Copyright 2015 Red Hat, Inc. and/or its affiliates. |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 5 | * |
| 6 | * Authors: |
| 7 | * Avi Kivity <avi@redhat.com> |
| 8 | * Gleb Natapov <gleb@redhat.com> |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 9 | * Wei Huang <wei@redhat.com> |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 10 | * |
| 11 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 12 | * the COPYING file in the top-level directory. |
| 13 | * |
| 14 | */ |
| 15 | |
| 16 | #include <linux/types.h> |
| 17 | #include <linux/kvm_host.h> |
| 18 | #include <linux/perf_event.h> |
Nadav Amit | d27aa7f | 2014-08-20 13:25:52 +0300 | [diff] [blame] | 19 | #include <asm/perf_event.h> |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 20 | #include "x86.h" |
| 21 | #include "cpuid.h" |
| 22 | #include "lapic.h" |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 23 | #include "pmu.h" |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 24 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 25 | /* NOTE: |
| 26 | * - Each perf counter is defined as "struct kvm_pmc"; |
| 27 | * - There are two types of perf counters: general purpose (gp) and fixed. |
| 28 | * gp counters are stored in gp_counters[] and fixed counters are stored |
| 29 | * in fixed_counters[] respectively. Both of them are part of "struct |
| 30 | * kvm_pmu"; |
| 31 | * - pmu.c understands the difference between gp counters and fixed counters. |
| 32 | * However AMD doesn't support fixed-counters; |
| 33 | * - There are three types of index to access perf counters (PMC): |
| 34 | * 1. MSR (named msr): For example Intel has MSR_IA32_PERFCTRn and AMD |
| 35 | * has MSR_K7_PERFCTRn. |
| 36 | * 2. MSR Index (named idx): This normally is used by RDPMC instruction. |
| 37 | * For instance AMD RDPMC instruction uses 0000_0003h in ECX to access |
| 38 | * C001_0007h (MSR_K7_PERCTR3). Intel has a similar mechanism, except |
| 39 | * that it also supports fixed counters. idx can be used to as index to |
| 40 | * gp and fixed counters. |
| 41 | * 3. Global PMC Index (named pmc): pmc is an index specific to PMU |
| 42 | * code. Each pmc, stored in kvm_pmc.idx field, is unique across |
| 43 | * all perf counters (both gp and fixed). The mapping relationship |
| 44 | * between pmc and perf counters is as the following: |
| 45 | * * Intel: [0 .. INTEL_PMC_MAX_GENERIC-1] <=> gp counters |
| 46 | * [INTEL_PMC_IDX_FIXED .. INTEL_PMC_IDX_FIXED + 2] <=> fixed |
| 47 | * * AMD: [0 .. AMD64_NUM_COUNTERS-1] <=> gp counters |
| 48 | */ |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 49 | |
Wei Huang | c6702c9 | 2015-06-19 13:44:45 +0200 | [diff] [blame] | 50 | static void kvm_pmi_trigger_fn(struct irq_work *irq_work) |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 51 | { |
Wei Huang | 212dba1 | 2015-06-19 14:00:33 +0200 | [diff] [blame] | 52 | struct kvm_pmu *pmu = container_of(irq_work, struct kvm_pmu, irq_work); |
| 53 | struct kvm_vcpu *vcpu = pmu_to_vcpu(pmu); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 54 | |
Wei Huang | c6702c9 | 2015-06-19 13:44:45 +0200 | [diff] [blame] | 55 | kvm_pmu_deliver_pmi(vcpu); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 56 | } |
| 57 | |
| 58 | static void kvm_perf_overflow(struct perf_event *perf_event, |
| 59 | struct perf_sample_data *data, |
| 60 | struct pt_regs *regs) |
| 61 | { |
| 62 | struct kvm_pmc *pmc = perf_event->overflow_handler_context; |
Wei Huang | 212dba1 | 2015-06-19 14:00:33 +0200 | [diff] [blame] | 63 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 64 | |
| 65 | if (!test_and_set_bit(pmc->idx, |
| 66 | (unsigned long *)&pmu->reprogram_pmi)) { |
Nadav Amit | 671bd99 | 2014-04-18 03:35:08 +0300 | [diff] [blame] | 67 | __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); |
| 68 | kvm_make_request(KVM_REQ_PMU, pmc->vcpu); |
| 69 | } |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 70 | } |
| 71 | |
| 72 | static void kvm_perf_overflow_intr(struct perf_event *perf_event, |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 73 | struct perf_sample_data *data, |
| 74 | struct pt_regs *regs) |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 75 | { |
| 76 | struct kvm_pmc *pmc = perf_event->overflow_handler_context; |
Wei Huang | 212dba1 | 2015-06-19 14:00:33 +0200 | [diff] [blame] | 77 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 78 | |
| 79 | if (!test_and_set_bit(pmc->idx, |
| 80 | (unsigned long *)&pmu->reprogram_pmi)) { |
Nadav Amit | 671bd99 | 2014-04-18 03:35:08 +0300 | [diff] [blame] | 81 | __set_bit(pmc->idx, (unsigned long *)&pmu->global_status); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 82 | kvm_make_request(KVM_REQ_PMU, pmc->vcpu); |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 83 | |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 84 | /* |
| 85 | * Inject PMI. If vcpu was in a guest mode during NMI PMI |
| 86 | * can be ejected on a guest mode re-entry. Otherwise we can't |
| 87 | * be sure that vcpu wasn't executing hlt instruction at the |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 88 | * time of vmexit and is not going to re-enter guest mode until |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 89 | * woken up. So we should wake it, but this is impossible from |
| 90 | * NMI context. Do it from irq work instead. |
| 91 | */ |
| 92 | if (!kvm_is_in_guest()) |
Wei Huang | 212dba1 | 2015-06-19 14:00:33 +0200 | [diff] [blame] | 93 | irq_work_queue(&pmc_to_pmu(pmc)->irq_work); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 94 | else |
| 95 | kvm_make_request(KVM_REQ_PMI, pmc->vcpu); |
| 96 | } |
| 97 | } |
| 98 | |
Wei Huang | c6702c9 | 2015-06-19 13:44:45 +0200 | [diff] [blame] | 99 | static void pmc_reprogram_counter(struct kvm_pmc *pmc, u32 type, |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 100 | unsigned config, bool exclude_user, |
| 101 | bool exclude_kernel, bool intr, |
| 102 | bool in_tx, bool in_tx_cp) |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 103 | { |
| 104 | struct perf_event *event; |
| 105 | struct perf_event_attr attr = { |
| 106 | .type = type, |
| 107 | .size = sizeof(attr), |
| 108 | .pinned = true, |
| 109 | .exclude_idle = true, |
| 110 | .exclude_host = 1, |
| 111 | .exclude_user = exclude_user, |
| 112 | .exclude_kernel = exclude_kernel, |
| 113 | .config = config, |
| 114 | }; |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 115 | |
Robert O'Callahan | bba82fd | 2017-02-01 17:06:11 +1300 | [diff] [blame] | 116 | attr.sample_period = (-pmc->counter) & pmc_bitmask(pmc); |
| 117 | |
Andi Kleen | 103af0a | 2013-07-18 15:57:02 -0700 | [diff] [blame] | 118 | if (in_tx) |
| 119 | attr.config |= HSW_IN_TX; |
Robert O'Callahan | bba82fd | 2017-02-01 17:06:11 +1300 | [diff] [blame] | 120 | if (in_tx_cp) { |
| 121 | /* |
| 122 | * HSW_IN_TX_CHECKPOINTED is not supported with nonzero |
| 123 | * period. Just clear the sample period so at least |
| 124 | * allocating the counter doesn't fail. |
| 125 | */ |
| 126 | attr.sample_period = 0; |
Andi Kleen | 103af0a | 2013-07-18 15:57:02 -0700 | [diff] [blame] | 127 | attr.config |= HSW_IN_TX_CHECKPOINTED; |
Robert O'Callahan | bba82fd | 2017-02-01 17:06:11 +1300 | [diff] [blame] | 128 | } |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 129 | |
| 130 | event = perf_event_create_kernel_counter(&attr, -1, current, |
| 131 | intr ? kvm_perf_overflow_intr : |
| 132 | kvm_perf_overflow, pmc); |
| 133 | if (IS_ERR(event)) { |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 134 | printk_once("kvm_pmu: event creation failed %ld\n", |
| 135 | PTR_ERR(event)); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 136 | return; |
| 137 | } |
| 138 | |
| 139 | pmc->perf_event = event; |
Wei Huang | 212dba1 | 2015-06-19 14:00:33 +0200 | [diff] [blame] | 140 | clear_bit(pmc->idx, (unsigned long*)&pmc_to_pmu(pmc)->reprogram_pmi); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 141 | } |
| 142 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 143 | void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel) |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 144 | { |
| 145 | unsigned config, type = PERF_TYPE_RAW; |
| 146 | u8 event_select, unit_mask; |
| 147 | |
Gleb Natapov | a7b9d2c | 2012-02-26 16:55:40 +0200 | [diff] [blame] | 148 | if (eventsel & ARCH_PERFMON_EVENTSEL_PIN_CONTROL) |
| 149 | printk_once("kvm pmu: pin control bit is ignored\n"); |
| 150 | |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 151 | pmc->eventsel = eventsel; |
| 152 | |
Wei Huang | c6702c9 | 2015-06-19 13:44:45 +0200 | [diff] [blame] | 153 | pmc_stop_counter(pmc); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 154 | |
Wei Huang | c6702c9 | 2015-06-19 13:44:45 +0200 | [diff] [blame] | 155 | if (!(eventsel & ARCH_PERFMON_EVENTSEL_ENABLE) || !pmc_is_enabled(pmc)) |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 156 | return; |
| 157 | |
| 158 | event_select = eventsel & ARCH_PERFMON_EVENTSEL_EVENT; |
| 159 | unit_mask = (eventsel & ARCH_PERFMON_EVENTSEL_UMASK) >> 8; |
| 160 | |
Gleb Natapov | fac3368 | 2012-02-26 16:55:41 +0200 | [diff] [blame] | 161 | if (!(eventsel & (ARCH_PERFMON_EVENTSEL_EDGE | |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 162 | ARCH_PERFMON_EVENTSEL_INV | |
| 163 | ARCH_PERFMON_EVENTSEL_CMASK | |
| 164 | HSW_IN_TX | |
| 165 | HSW_IN_TX_CHECKPOINTED))) { |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 166 | config = kvm_x86_ops->pmu_ops->find_arch_event(pmc_to_pmu(pmc), |
| 167 | event_select, |
| 168 | unit_mask); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 169 | if (config != PERF_COUNT_HW_MAX) |
| 170 | type = PERF_TYPE_HARDWARE; |
| 171 | } |
| 172 | |
| 173 | if (type == PERF_TYPE_RAW) |
| 174 | config = eventsel & X86_RAW_EVENT_MASK; |
| 175 | |
Wei Huang | c6702c9 | 2015-06-19 13:44:45 +0200 | [diff] [blame] | 176 | pmc_reprogram_counter(pmc, type, config, |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 177 | !(eventsel & ARCH_PERFMON_EVENTSEL_USR), |
| 178 | !(eventsel & ARCH_PERFMON_EVENTSEL_OS), |
| 179 | eventsel & ARCH_PERFMON_EVENTSEL_INT, |
| 180 | (eventsel & HSW_IN_TX), |
| 181 | (eventsel & HSW_IN_TX_CHECKPOINTED)); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 182 | } |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 183 | EXPORT_SYMBOL_GPL(reprogram_gp_counter); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 184 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 185 | void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int idx) |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 186 | { |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 187 | unsigned en_field = ctrl & 0x3; |
| 188 | bool pmi = ctrl & 0x8; |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 189 | |
Wei Huang | c6702c9 | 2015-06-19 13:44:45 +0200 | [diff] [blame] | 190 | pmc_stop_counter(pmc); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 191 | |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 192 | if (!en_field || !pmc_is_enabled(pmc)) |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 193 | return; |
| 194 | |
Wei Huang | c6702c9 | 2015-06-19 13:44:45 +0200 | [diff] [blame] | 195 | pmc_reprogram_counter(pmc, PERF_TYPE_HARDWARE, |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 196 | kvm_x86_ops->pmu_ops->find_fixed_event(idx), |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 197 | !(en_field & 0x2), /* exclude user */ |
| 198 | !(en_field & 0x1), /* exclude kernel */ |
| 199 | pmi, false, false); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 200 | } |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 201 | EXPORT_SYMBOL_GPL(reprogram_fixed_counter); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 202 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 203 | void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx) |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 204 | { |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 205 | struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, pmc_idx); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 206 | |
| 207 | if (!pmc) |
| 208 | return; |
| 209 | |
| 210 | if (pmc_is_gp(pmc)) |
| 211 | reprogram_gp_counter(pmc, pmc->eventsel); |
| 212 | else { |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 213 | int idx = pmc_idx - INTEL_PMC_IDX_FIXED; |
| 214 | u8 ctrl = fixed_ctrl_field(pmu->fixed_ctr_ctrl, idx); |
| 215 | |
| 216 | reprogram_fixed_counter(pmc, ctrl, idx); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 217 | } |
| 218 | } |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 219 | EXPORT_SYMBOL_GPL(reprogram_counter); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 220 | |
Wei Huang | e5af058 | 2015-06-19 15:51:47 +0200 | [diff] [blame] | 221 | void kvm_pmu_handle_event(struct kvm_vcpu *vcpu) |
| 222 | { |
| 223 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 224 | u64 bitmask; |
| 225 | int bit; |
| 226 | |
| 227 | bitmask = pmu->reprogram_pmi; |
| 228 | |
| 229 | for_each_set_bit(bit, (unsigned long *)&bitmask, X86_PMC_IDX_MAX) { |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 230 | struct kvm_pmc *pmc = kvm_x86_ops->pmu_ops->pmc_idx_to_pmc(pmu, bit); |
Wei Huang | e5af058 | 2015-06-19 15:51:47 +0200 | [diff] [blame] | 231 | |
| 232 | if (unlikely(!pmc || !pmc->perf_event)) { |
| 233 | clear_bit(bit, (unsigned long *)&pmu->reprogram_pmi); |
| 234 | continue; |
| 235 | } |
| 236 | |
| 237 | reprogram_counter(pmu, bit); |
| 238 | } |
| 239 | } |
| 240 | |
| 241 | /* check if idx is a valid index to access PMU */ |
| 242 | int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) |
| 243 | { |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 244 | return kvm_x86_ops->pmu_ops->is_valid_msr_idx(vcpu, idx); |
Wei Huang | 41aac14 | 2015-06-19 16:16:59 +0200 | [diff] [blame] | 245 | } |
| 246 | |
| 247 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned idx, u64 *data) |
| 248 | { |
| 249 | bool fast_mode = idx & (1u << 31); |
| 250 | struct kvm_pmc *pmc; |
| 251 | u64 ctr_val; |
| 252 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 253 | pmc = kvm_x86_ops->pmu_ops->msr_idx_to_pmc(vcpu, idx); |
Wei Huang | 41aac14 | 2015-06-19 16:16:59 +0200 | [diff] [blame] | 254 | if (!pmc) |
| 255 | return 1; |
| 256 | |
| 257 | ctr_val = pmc_read_counter(pmc); |
Wei Huang | e5af058 | 2015-06-19 15:51:47 +0200 | [diff] [blame] | 258 | if (fast_mode) |
| 259 | ctr_val = (u32)ctr_val; |
| 260 | |
| 261 | *data = ctr_val; |
| 262 | return 0; |
| 263 | } |
| 264 | |
| 265 | void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu) |
| 266 | { |
Paolo Bonzini | bce87cc | 2016-01-08 13:48:51 +0100 | [diff] [blame] | 267 | if (lapic_in_kernel(vcpu)) |
Wei Huang | e5af058 | 2015-06-19 15:51:47 +0200 | [diff] [blame] | 268 | kvm_apic_local_deliver(vcpu->arch.apic, APIC_LVTPC); |
| 269 | } |
| 270 | |
Wei Huang | c6702c9 | 2015-06-19 13:44:45 +0200 | [diff] [blame] | 271 | bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 272 | { |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 273 | return kvm_x86_ops->pmu_ops->is_valid_msr(vcpu, msr); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 274 | } |
| 275 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 276 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 277 | { |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 278 | return kvm_x86_ops->pmu_ops->get_msr(vcpu, msr, data); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 279 | } |
| 280 | |
Paolo Bonzini | afd80d8 | 2013-03-28 17:18:35 +0100 | [diff] [blame] | 281 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 282 | { |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 283 | return kvm_x86_ops->pmu_ops->set_msr(vcpu, msr_info); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 284 | } |
| 285 | |
Wei Huang | e84cfe4 | 2015-06-19 14:15:28 +0200 | [diff] [blame] | 286 | /* refresh PMU settings. This function generally is called when underlying |
| 287 | * settings are changed (such as changes of PMU CPUID by guest VMs), which |
| 288 | * should rarely happen. |
| 289 | */ |
Wei Huang | c6702c9 | 2015-06-19 13:44:45 +0200 | [diff] [blame] | 290 | void kvm_pmu_refresh(struct kvm_vcpu *vcpu) |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 291 | { |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 292 | kvm_x86_ops->pmu_ops->refresh(vcpu); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 293 | } |
| 294 | |
Wei Huang | e5af058 | 2015-06-19 15:51:47 +0200 | [diff] [blame] | 295 | void kvm_pmu_reset(struct kvm_vcpu *vcpu) |
| 296 | { |
| 297 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
Wei Huang | e5af058 | 2015-06-19 15:51:47 +0200 | [diff] [blame] | 298 | |
| 299 | irq_work_sync(&pmu->irq_work); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 300 | kvm_x86_ops->pmu_ops->reset(vcpu); |
Wei Huang | e5af058 | 2015-06-19 15:51:47 +0200 | [diff] [blame] | 301 | } |
| 302 | |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 303 | void kvm_pmu_init(struct kvm_vcpu *vcpu) |
| 304 | { |
Wei Huang | 212dba1 | 2015-06-19 14:00:33 +0200 | [diff] [blame] | 305 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 306 | |
| 307 | memset(pmu, 0, sizeof(*pmu)); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 308 | kvm_x86_ops->pmu_ops->init(vcpu); |
Wei Huang | c6702c9 | 2015-06-19 13:44:45 +0200 | [diff] [blame] | 309 | init_irq_work(&pmu->irq_work, kvm_pmi_trigger_fn); |
| 310 | kvm_pmu_refresh(vcpu); |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 311 | } |
| 312 | |
Gleb Natapov | f5132b0 | 2011-11-10 14:57:22 +0200 | [diff] [blame] | 313 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu) |
| 314 | { |
| 315 | kvm_pmu_reset(vcpu); |
| 316 | } |