Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 1 | /* |
| 2 | * KVM PMU support for AMD |
| 3 | * |
| 4 | * Copyright 2015, Red Hat, Inc. and/or its affiliates. |
| 5 | * |
| 6 | * Author: |
| 7 | * Wei Huang <wei@redhat.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 10 | * the COPYING file in the top-level directory. |
| 11 | * |
| 12 | * Implementation is based on pmu_intel.c file |
| 13 | */ |
| 14 | #include <linux/types.h> |
| 15 | #include <linux/kvm_host.h> |
| 16 | #include <linux/perf_event.h> |
| 17 | #include "x86.h" |
| 18 | #include "cpuid.h" |
| 19 | #include "lapic.h" |
| 20 | #include "pmu.h" |
| 21 | |
Wei Huang | ca72430 | 2015-06-12 01:34:55 -0400 | [diff] [blame] | 22 | /* duplicated from amd_perfmon_event_map, K7 and above should work. */ |
| 23 | static struct kvm_event_hw_type_mapping amd_event_mapping[] = { |
| 24 | [0] = { 0x76, 0x00, PERF_COUNT_HW_CPU_CYCLES }, |
| 25 | [1] = { 0xc0, 0x00, PERF_COUNT_HW_INSTRUCTIONS }, |
| 26 | [2] = { 0x80, 0x00, PERF_COUNT_HW_CACHE_REFERENCES }, |
| 27 | [3] = { 0x81, 0x00, PERF_COUNT_HW_CACHE_MISSES }, |
| 28 | [4] = { 0xc2, 0x00, PERF_COUNT_HW_BRANCH_INSTRUCTIONS }, |
| 29 | [5] = { 0xc3, 0x00, PERF_COUNT_HW_BRANCH_MISSES }, |
| 30 | [6] = { 0xd0, 0x00, PERF_COUNT_HW_STALLED_CYCLES_FRONTEND }, |
| 31 | [7] = { 0xd1, 0x00, PERF_COUNT_HW_STALLED_CYCLES_BACKEND }, |
| 32 | }; |
| 33 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 34 | static unsigned amd_find_arch_event(struct kvm_pmu *pmu, |
| 35 | u8 event_select, |
| 36 | u8 unit_mask) |
| 37 | { |
Wei Huang | ca72430 | 2015-06-12 01:34:55 -0400 | [diff] [blame] | 38 | int i; |
| 39 | |
| 40 | for (i = 0; i < ARRAY_SIZE(amd_event_mapping); i++) |
| 41 | if (amd_event_mapping[i].eventsel == event_select |
| 42 | && amd_event_mapping[i].unit_mask == unit_mask) |
| 43 | break; |
| 44 | |
| 45 | if (i == ARRAY_SIZE(amd_event_mapping)) |
| 46 | return PERF_COUNT_HW_MAX; |
| 47 | |
| 48 | return amd_event_mapping[i].event_type; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 49 | } |
| 50 | |
| 51 | /* return PERF_COUNT_HW_MAX as AMD doesn't have fixed events */ |
| 52 | static unsigned amd_find_fixed_event(int idx) |
| 53 | { |
| 54 | return PERF_COUNT_HW_MAX; |
| 55 | } |
| 56 | |
Wei Huang | ca72430 | 2015-06-12 01:34:55 -0400 | [diff] [blame] | 57 | /* check if a PMC is enabled by comparing it against global_ctrl bits. Because |
| 58 | * AMD CPU doesn't have global_ctrl MSR, all PMCs are enabled (return TRUE). |
| 59 | */ |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 60 | static bool amd_pmc_is_enabled(struct kvm_pmc *pmc) |
| 61 | { |
Wei Huang | ca72430 | 2015-06-12 01:34:55 -0400 | [diff] [blame] | 62 | return true; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 63 | } |
| 64 | |
| 65 | static struct kvm_pmc *amd_pmc_idx_to_pmc(struct kvm_pmu *pmu, int pmc_idx) |
| 66 | { |
Wei Huang | ca72430 | 2015-06-12 01:34:55 -0400 | [diff] [blame] | 67 | return get_gp_pmc(pmu, MSR_K7_EVNTSEL0 + pmc_idx, MSR_K7_EVNTSEL0); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | /* returns 0 if idx's corresponding MSR exists; otherwise returns 1. */ |
| 71 | static int amd_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx) |
| 72 | { |
Wei Huang | ca72430 | 2015-06-12 01:34:55 -0400 | [diff] [blame] | 73 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 74 | |
| 75 | idx &= ~(3u << 30); |
| 76 | |
| 77 | return (idx >= pmu->nr_arch_gp_counters); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 78 | } |
| 79 | |
| 80 | /* idx is the ECX register of RDPMC instruction */ |
| 81 | static struct kvm_pmc *amd_msr_idx_to_pmc(struct kvm_vcpu *vcpu, unsigned idx) |
| 82 | { |
Wei Huang | ca72430 | 2015-06-12 01:34:55 -0400 | [diff] [blame] | 83 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 84 | struct kvm_pmc *counters; |
| 85 | |
| 86 | idx &= ~(3u << 30); |
| 87 | if (idx >= pmu->nr_arch_gp_counters) |
| 88 | return NULL; |
| 89 | counters = pmu->gp_counters; |
| 90 | |
| 91 | return &counters[idx]; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 92 | } |
| 93 | |
| 94 | static bool amd_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr) |
| 95 | { |
Wei Huang | ca72430 | 2015-06-12 01:34:55 -0400 | [diff] [blame] | 96 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 97 | int ret = false; |
| 98 | |
| 99 | ret = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0) || |
| 100 | get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0); |
| 101 | |
| 102 | return ret; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 103 | } |
| 104 | |
| 105 | static int amd_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data) |
| 106 | { |
Wei Huang | ca72430 | 2015-06-12 01:34:55 -0400 | [diff] [blame] | 107 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 108 | struct kvm_pmc *pmc; |
| 109 | |
| 110 | /* MSR_K7_PERFCTRn */ |
| 111 | pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0); |
| 112 | if (pmc) { |
| 113 | *data = pmc_read_counter(pmc); |
| 114 | return 0; |
| 115 | } |
| 116 | /* MSR_K7_EVNTSELn */ |
| 117 | pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0); |
| 118 | if (pmc) { |
| 119 | *data = pmc->eventsel; |
| 120 | return 0; |
| 121 | } |
| 122 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 123 | return 1; |
| 124 | } |
| 125 | |
| 126 | static int amd_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info) |
| 127 | { |
Wei Huang | ca72430 | 2015-06-12 01:34:55 -0400 | [diff] [blame] | 128 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 129 | struct kvm_pmc *pmc; |
| 130 | u32 msr = msr_info->index; |
| 131 | u64 data = msr_info->data; |
| 132 | |
| 133 | /* MSR_K7_PERFCTRn */ |
| 134 | pmc = get_gp_pmc(pmu, msr, MSR_K7_PERFCTR0); |
| 135 | if (pmc) { |
| 136 | if (!msr_info->host_initiated) |
| 137 | data = (s64)data; |
| 138 | pmc->counter += data - pmc_read_counter(pmc); |
| 139 | return 0; |
| 140 | } |
| 141 | /* MSR_K7_EVNTSELn */ |
| 142 | pmc = get_gp_pmc(pmu, msr, MSR_K7_EVNTSEL0); |
| 143 | if (pmc) { |
| 144 | if (data == pmc->eventsel) |
| 145 | return 0; |
| 146 | if (!(data & pmu->reserved_bits)) { |
| 147 | reprogram_gp_counter(pmc, data); |
| 148 | return 0; |
| 149 | } |
| 150 | } |
| 151 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 152 | return 1; |
| 153 | } |
| 154 | |
| 155 | static void amd_pmu_refresh(struct kvm_vcpu *vcpu) |
| 156 | { |
Wei Huang | ca72430 | 2015-06-12 01:34:55 -0400 | [diff] [blame] | 157 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 158 | |
| 159 | pmu->nr_arch_gp_counters = AMD64_NUM_COUNTERS; |
| 160 | pmu->counter_bitmask[KVM_PMC_GP] = ((u64)1 << 48) - 1; |
| 161 | pmu->reserved_bits = 0xffffffff00200000ull; |
| 162 | /* not applicable to AMD; but clean them to prevent any fall out */ |
| 163 | pmu->counter_bitmask[KVM_PMC_FIXED] = 0; |
| 164 | pmu->nr_arch_fixed_counters = 0; |
| 165 | pmu->version = 0; |
| 166 | pmu->global_status = 0; |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 167 | } |
| 168 | |
| 169 | static void amd_pmu_init(struct kvm_vcpu *vcpu) |
| 170 | { |
Wei Huang | ca72430 | 2015-06-12 01:34:55 -0400 | [diff] [blame] | 171 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 172 | int i; |
| 173 | |
| 174 | for (i = 0; i < AMD64_NUM_COUNTERS ; i++) { |
| 175 | pmu->gp_counters[i].type = KVM_PMC_GP; |
| 176 | pmu->gp_counters[i].vcpu = vcpu; |
| 177 | pmu->gp_counters[i].idx = i; |
| 178 | } |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 179 | } |
| 180 | |
| 181 | static void amd_pmu_reset(struct kvm_vcpu *vcpu) |
| 182 | { |
Wei Huang | ca72430 | 2015-06-12 01:34:55 -0400 | [diff] [blame] | 183 | struct kvm_pmu *pmu = vcpu_to_pmu(vcpu); |
| 184 | int i; |
| 185 | |
| 186 | for (i = 0; i < AMD64_NUM_COUNTERS; i++) { |
| 187 | struct kvm_pmc *pmc = &pmu->gp_counters[i]; |
| 188 | |
| 189 | pmc_stop_counter(pmc); |
| 190 | pmc->counter = pmc->eventsel = 0; |
| 191 | } |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 192 | } |
| 193 | |
| 194 | struct kvm_pmu_ops amd_pmu_ops = { |
| 195 | .find_arch_event = amd_find_arch_event, |
| 196 | .find_fixed_event = amd_find_fixed_event, |
| 197 | .pmc_is_enabled = amd_pmc_is_enabled, |
| 198 | .pmc_idx_to_pmc = amd_pmc_idx_to_pmc, |
| 199 | .msr_idx_to_pmc = amd_msr_idx_to_pmc, |
| 200 | .is_valid_msr_idx = amd_is_valid_msr_idx, |
| 201 | .is_valid_msr = amd_is_valid_msr, |
| 202 | .get_msr = amd_pmu_get_msr, |
| 203 | .set_msr = amd_pmu_set_msr, |
| 204 | .refresh = amd_pmu_refresh, |
| 205 | .init = amd_pmu_init, |
| 206 | .reset = amd_pmu_reset, |
| 207 | }; |