Avi Kivity | 26eef70 | 2008-07-03 14:59:22 +0300 | [diff] [blame] | 1 | #ifndef ARCH_X86_KVM_X86_H |
| 2 | #define ARCH_X86_KVM_X86_H |
| 3 | |
| 4 | #include <linux/kvm_host.h> |
Avi Kivity | 3eeb328 | 2010-01-21 15:31:48 +0200 | [diff] [blame] | 5 | #include "kvm_cache_regs.h" |
Avi Kivity | 26eef70 | 2008-07-03 14:59:22 +0300 | [diff] [blame] | 6 | |
| 7 | static inline void kvm_clear_exception_queue(struct kvm_vcpu *vcpu) |
| 8 | { |
| 9 | vcpu->arch.exception.pending = false; |
| 10 | } |
| 11 | |
Gleb Natapov | 66fd3f7 | 2009-05-11 13:35:50 +0300 | [diff] [blame] | 12 | static inline void kvm_queue_interrupt(struct kvm_vcpu *vcpu, u8 vector, |
| 13 | bool soft) |
Avi Kivity | 937a7ea | 2008-07-03 15:17:01 +0300 | [diff] [blame] | 14 | { |
| 15 | vcpu->arch.interrupt.pending = true; |
Gleb Natapov | 66fd3f7 | 2009-05-11 13:35:50 +0300 | [diff] [blame] | 16 | vcpu->arch.interrupt.soft = soft; |
Avi Kivity | 937a7ea | 2008-07-03 15:17:01 +0300 | [diff] [blame] | 17 | vcpu->arch.interrupt.nr = vector; |
| 18 | } |
| 19 | |
| 20 | static inline void kvm_clear_interrupt_queue(struct kvm_vcpu *vcpu) |
| 21 | { |
| 22 | vcpu->arch.interrupt.pending = false; |
| 23 | } |
| 24 | |
Gleb Natapov | 3298b75 | 2009-05-11 13:35:46 +0300 | [diff] [blame] | 25 | static inline bool kvm_event_needs_reinjection(struct kvm_vcpu *vcpu) |
| 26 | { |
| 27 | return vcpu->arch.exception.pending || vcpu->arch.interrupt.pending || |
| 28 | vcpu->arch.nmi_injected; |
| 29 | } |
Gleb Natapov | 66fd3f7 | 2009-05-11 13:35:50 +0300 | [diff] [blame] | 30 | |
| 31 | static inline bool kvm_exception_is_soft(unsigned int nr) |
| 32 | { |
| 33 | return (nr == BP_VECTOR) || (nr == OF_VECTOR); |
| 34 | } |
Gleb Natapov | fc61b80 | 2009-07-05 17:39:35 +0300 | [diff] [blame] | 35 | |
Avi Kivity | 3eeb328 | 2010-01-21 15:31:48 +0200 | [diff] [blame] | 36 | static inline bool is_protmode(struct kvm_vcpu *vcpu) |
| 37 | { |
| 38 | return kvm_read_cr0_bits(vcpu, X86_CR0_PE); |
| 39 | } |
| 40 | |
Avi Kivity | 836a1b3 | 2010-01-21 15:31:49 +0200 | [diff] [blame] | 41 | static inline int is_long_mode(struct kvm_vcpu *vcpu) |
| 42 | { |
| 43 | #ifdef CONFIG_X86_64 |
Avi Kivity | f6801df | 2010-01-21 15:31:50 +0200 | [diff] [blame] | 44 | return vcpu->arch.efer & EFER_LMA; |
Avi Kivity | 836a1b3 | 2010-01-21 15:31:49 +0200 | [diff] [blame] | 45 | #else |
| 46 | return 0; |
| 47 | #endif |
| 48 | } |
| 49 | |
Joerg Roedel | 6539e73 | 2010-09-10 17:30:50 +0200 | [diff] [blame] | 50 | static inline bool mmu_is_nested(struct kvm_vcpu *vcpu) |
| 51 | { |
| 52 | return vcpu->arch.walk_mmu == &vcpu->arch.nested_mmu; |
| 53 | } |
| 54 | |
Avi Kivity | 836a1b3 | 2010-01-21 15:31:49 +0200 | [diff] [blame] | 55 | static inline int is_pae(struct kvm_vcpu *vcpu) |
| 56 | { |
| 57 | return kvm_read_cr4_bits(vcpu, X86_CR4_PAE); |
| 58 | } |
| 59 | |
| 60 | static inline int is_pse(struct kvm_vcpu *vcpu) |
| 61 | { |
| 62 | return kvm_read_cr4_bits(vcpu, X86_CR4_PSE); |
| 63 | } |
| 64 | |
| 65 | static inline int is_paging(struct kvm_vcpu *vcpu) |
| 66 | { |
Davidlohr Bueso | c36fc04 | 2012-03-08 12:45:54 +0100 | [diff] [blame] | 67 | return likely(kvm_read_cr0_bits(vcpu, X86_CR0_PG)); |
Avi Kivity | 836a1b3 | 2010-01-21 15:31:49 +0200 | [diff] [blame] | 68 | } |
| 69 | |
Joerg Roedel | 24d1b15 | 2010-12-07 17:15:05 +0100 | [diff] [blame] | 70 | static inline u32 bit(int bitno) |
| 71 | { |
| 72 | return 1 << (bitno & 31); |
| 73 | } |
| 74 | |
Xiao Guangrong | bebb106 | 2011-07-12 03:23:20 +0800 | [diff] [blame] | 75 | static inline void vcpu_cache_mmio_info(struct kvm_vcpu *vcpu, |
| 76 | gva_t gva, gfn_t gfn, unsigned access) |
| 77 | { |
| 78 | vcpu->arch.mmio_gva = gva & PAGE_MASK; |
| 79 | vcpu->arch.access = access; |
| 80 | vcpu->arch.mmio_gfn = gfn; |
| 81 | } |
| 82 | |
| 83 | /* |
| 84 | * Clear the mmio cache info for the given gva, |
| 85 | * specially, if gva is ~0ul, we clear all mmio cache info. |
| 86 | */ |
| 87 | static inline void vcpu_clear_mmio_info(struct kvm_vcpu *vcpu, gva_t gva) |
| 88 | { |
| 89 | if (gva != (~0ul) && vcpu->arch.mmio_gva != (gva & PAGE_MASK)) |
| 90 | return; |
| 91 | |
| 92 | vcpu->arch.mmio_gva = 0; |
| 93 | } |
| 94 | |
| 95 | static inline bool vcpu_match_mmio_gva(struct kvm_vcpu *vcpu, unsigned long gva) |
| 96 | { |
| 97 | if (vcpu->arch.mmio_gva && vcpu->arch.mmio_gva == (gva & PAGE_MASK)) |
| 98 | return true; |
| 99 | |
| 100 | return false; |
| 101 | } |
| 102 | |
| 103 | static inline bool vcpu_match_mmio_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) |
| 104 | { |
| 105 | if (vcpu->arch.mmio_gfn && vcpu->arch.mmio_gfn == gpa >> PAGE_SHIFT) |
| 106 | return true; |
| 107 | |
| 108 | return false; |
| 109 | } |
| 110 | |
Zhang, Yanmin | ff9d07a | 2010-04-19 13:32:45 +0800 | [diff] [blame] | 111 | void kvm_before_handle_nmi(struct kvm_vcpu *vcpu); |
| 112 | void kvm_after_handle_nmi(struct kvm_vcpu *vcpu); |
Serge E. Hallyn | 71f9833 | 2011-04-13 09:12:54 -0500 | [diff] [blame] | 113 | int kvm_inject_realmode_interrupt(struct kvm_vcpu *vcpu, int irq, int inc_eip); |
Zhang, Yanmin | ff9d07a | 2010-04-19 13:32:45 +0800 | [diff] [blame] | 114 | |
Will Auld | 8fe8ab4 | 2012-11-29 12:42:12 -0800 | [diff] [blame] | 115 | void kvm_write_tsc(struct kvm_vcpu *vcpu, struct msr_data *msr); |
Zachary Amsden | 99e3e30 | 2010-08-19 22:07:17 -1000 | [diff] [blame] | 116 | |
Nadav Har'El | 064aea7 | 2011-05-25 23:04:56 +0300 | [diff] [blame] | 117 | int kvm_read_guest_virt(struct x86_emulate_ctxt *ctxt, |
| 118 | gva_t addr, void *val, unsigned int bytes, |
| 119 | struct x86_exception *exception); |
| 120 | |
Nadav Har'El | 6a4d755 | 2011-05-25 23:08:00 +0300 | [diff] [blame] | 121 | int kvm_write_guest_virt_system(struct x86_emulate_ctxt *ctxt, |
| 122 | gva_t addr, void *val, unsigned int bytes, |
| 123 | struct x86_exception *exception); |
| 124 | |
Avi Kivity | 00b27a3 | 2011-11-23 16:30:32 +0200 | [diff] [blame] | 125 | extern u64 host_xcr0; |
| 126 | |
Gleb Natapov | 54e9818 | 2012-08-05 15:58:32 +0300 | [diff] [blame] | 127 | extern struct static_key kvm_no_apic_vcpu; |
Avi Kivity | 26eef70 | 2008-07-03 14:59:22 +0300 | [diff] [blame] | 128 | #endif |