Greg Kroah-Hartman | b244131 | 2017-11-01 15:07:57 +0100 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0 */ |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 2 | #ifndef __KVM_X86_PMU_H |
| 3 | #define __KVM_X86_PMU_H |
| 4 | |
| 5 | #define vcpu_to_pmu(vcpu) (&(vcpu)->arch.pmu) |
| 6 | #define pmu_to_vcpu(pmu) (container_of((pmu), struct kvm_vcpu, arch.pmu)) |
| 7 | #define pmc_to_pmu(pmc) (&(pmc)->vcpu->arch.pmu) |
| 8 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 9 | /* retrieve the 4 bits for EN and PMI out of IA32_FIXED_CTR_CTRL */ |
| 10 | #define fixed_ctrl_field(ctrl_reg, idx) (((ctrl_reg) >> ((idx)*4)) & 0xf) |
| 11 | |
Arbel Moshe | 2d7921c | 2018-03-12 13:12:53 +0200 | [diff] [blame] | 12 | #define VMWARE_BACKDOOR_PMC_HOST_TSC 0x10000 |
| 13 | #define VMWARE_BACKDOOR_PMC_REAL_TIME 0x10001 |
| 14 | #define VMWARE_BACKDOOR_PMC_APPARENT_TIME 0x10002 |
| 15 | |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 16 | struct kvm_event_hw_type_mapping { |
| 17 | u8 eventsel; |
| 18 | u8 unit_mask; |
| 19 | unsigned event_type; |
| 20 | }; |
| 21 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 22 | struct kvm_pmu_ops { |
| 23 | unsigned (*find_arch_event)(struct kvm_pmu *pmu, u8 event_select, |
| 24 | u8 unit_mask); |
| 25 | unsigned (*find_fixed_event)(int idx); |
| 26 | bool (*pmc_is_enabled)(struct kvm_pmc *pmc); |
| 27 | struct kvm_pmc *(*pmc_idx_to_pmc)(struct kvm_pmu *pmu, int pmc_idx); |
Paolo Bonzini | 04d2a11 | 2019-05-20 17:20:40 +0200 | [diff] [blame] | 28 | struct kvm_pmc *(*msr_idx_to_pmc)(struct kvm_vcpu *vcpu, unsigned idx, |
| 29 | u64 *mask); |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 30 | int (*is_valid_msr_idx)(struct kvm_vcpu *vcpu, unsigned idx); |
| 31 | bool (*is_valid_msr)(struct kvm_vcpu *vcpu, u32 msr); |
| 32 | int (*get_msr)(struct kvm_vcpu *vcpu, u32 msr, u64 *data); |
| 33 | int (*set_msr)(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
| 34 | void (*refresh)(struct kvm_vcpu *vcpu); |
| 35 | void (*init)(struct kvm_vcpu *vcpu); |
| 36 | void (*reset)(struct kvm_vcpu *vcpu); |
| 37 | }; |
| 38 | |
| 39 | static inline u64 pmc_bitmask(struct kvm_pmc *pmc) |
| 40 | { |
| 41 | struct kvm_pmu *pmu = pmc_to_pmu(pmc); |
| 42 | |
| 43 | return pmu->counter_bitmask[pmc->type]; |
| 44 | } |
| 45 | |
| 46 | static inline u64 pmc_read_counter(struct kvm_pmc *pmc) |
| 47 | { |
| 48 | u64 counter, enabled, running; |
| 49 | |
| 50 | counter = pmc->counter; |
| 51 | if (pmc->perf_event) |
| 52 | counter += perf_event_read_value(pmc->perf_event, |
| 53 | &enabled, &running); |
| 54 | /* FIXME: Scaling needed? */ |
| 55 | return counter & pmc_bitmask(pmc); |
| 56 | } |
| 57 | |
| 58 | static inline void pmc_stop_counter(struct kvm_pmc *pmc) |
| 59 | { |
| 60 | if (pmc->perf_event) { |
| 61 | pmc->counter = pmc_read_counter(pmc); |
| 62 | perf_event_release_kernel(pmc->perf_event); |
| 63 | pmc->perf_event = NULL; |
| 64 | } |
| 65 | } |
| 66 | |
| 67 | static inline bool pmc_is_gp(struct kvm_pmc *pmc) |
| 68 | { |
| 69 | return pmc->type == KVM_PMC_GP; |
| 70 | } |
| 71 | |
| 72 | static inline bool pmc_is_fixed(struct kvm_pmc *pmc) |
| 73 | { |
| 74 | return pmc->type == KVM_PMC_FIXED; |
| 75 | } |
| 76 | |
| 77 | static inline bool pmc_is_enabled(struct kvm_pmc *pmc) |
| 78 | { |
| 79 | return kvm_x86_ops->pmu_ops->pmc_is_enabled(pmc); |
| 80 | } |
| 81 | |
| 82 | /* returns general purpose PMC with the specified MSR. Note that it can be |
| 83 | * used for both PERFCTRn and EVNTSELn; that is why it accepts base as a |
| 84 | * paramenter to tell them apart. |
| 85 | */ |
| 86 | static inline struct kvm_pmc *get_gp_pmc(struct kvm_pmu *pmu, u32 msr, |
| 87 | u32 base) |
| 88 | { |
| 89 | if (msr >= base && msr < base + pmu->nr_arch_gp_counters) |
| 90 | return &pmu->gp_counters[msr - base]; |
| 91 | |
| 92 | return NULL; |
| 93 | } |
| 94 | |
| 95 | /* returns fixed PMC with the specified MSR */ |
| 96 | static inline struct kvm_pmc *get_fixed_pmc(struct kvm_pmu *pmu, u32 msr) |
| 97 | { |
| 98 | int base = MSR_CORE_PERF_FIXED_CTR0; |
| 99 | |
| 100 | if (msr >= base && msr < base + pmu->nr_arch_fixed_counters) |
| 101 | return &pmu->fixed_counters[msr - base]; |
| 102 | |
| 103 | return NULL; |
| 104 | } |
| 105 | |
| 106 | void reprogram_gp_counter(struct kvm_pmc *pmc, u64 eventsel); |
| 107 | void reprogram_fixed_counter(struct kvm_pmc *pmc, u8 ctrl, int fixed_idx); |
| 108 | void reprogram_counter(struct kvm_pmu *pmu, int pmc_idx); |
| 109 | |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 110 | void kvm_pmu_deliver_pmi(struct kvm_vcpu *vcpu); |
| 111 | void kvm_pmu_handle_event(struct kvm_vcpu *vcpu); |
| 112 | int kvm_pmu_rdpmc(struct kvm_vcpu *vcpu, unsigned pmc, u64 *data); |
| 113 | int kvm_pmu_is_valid_msr_idx(struct kvm_vcpu *vcpu, unsigned idx); |
| 114 | bool kvm_pmu_is_valid_msr(struct kvm_vcpu *vcpu, u32 msr); |
| 115 | int kvm_pmu_get_msr(struct kvm_vcpu *vcpu, u32 msr, u64 *data); |
| 116 | int kvm_pmu_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info); |
| 117 | void kvm_pmu_refresh(struct kvm_vcpu *vcpu); |
| 118 | void kvm_pmu_reset(struct kvm_vcpu *vcpu); |
| 119 | void kvm_pmu_init(struct kvm_vcpu *vcpu); |
| 120 | void kvm_pmu_destroy(struct kvm_vcpu *vcpu); |
| 121 | |
Arbel Moshe | 2d7921c | 2018-03-12 13:12:53 +0200 | [diff] [blame] | 122 | bool is_vmware_backdoor_pmc(u32 pmc_idx); |
| 123 | |
Wei Huang | 25462f7 | 2015-06-19 15:45:05 +0200 | [diff] [blame] | 124 | extern struct kvm_pmu_ops intel_pmu_ops; |
| 125 | extern struct kvm_pmu_ops amd_pmu_ops; |
Wei Huang | 474a5bb | 2015-06-19 13:54:23 +0200 | [diff] [blame] | 126 | #endif /* __KVM_X86_PMU_H */ |