Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 1 | #ifndef __KVM_HOST_H |
| 2 | #define __KVM_HOST_H |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 3 | |
| 4 | /* |
| 5 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 6 | * the COPYING file in the top-level directory. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/types.h> |
Christian Borntraeger | e56a7a2 | 2007-10-18 14:39:10 +0200 | [diff] [blame] | 10 | #include <linux/hardirq.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 11 | #include <linux/list.h> |
| 12 | #include <linux/mutex.h> |
| 13 | #include <linux/spinlock.h> |
Markus Rechberger | 06ff0d3 | 2007-05-27 10:46:52 +0300 | [diff] [blame] | 14 | #include <linux/signal.h> |
| 15 | #include <linux/sched.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 16 | #include <linux/bug.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 17 | #include <linux/mm.h> |
Eric B Munson | b297e67 | 2011-10-10 11:46:15 -0400 | [diff] [blame] | 18 | #include <linux/mmu_notifier.h> |
Avi Kivity | 15ad714 | 2007-07-11 18:17:21 +0300 | [diff] [blame] | 19 | #include <linux/preempt.h> |
Sheng Yang | 0937c48 | 2008-11-24 14:32:53 +0800 | [diff] [blame] | 20 | #include <linux/msi.h> |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 21 | #include <linux/slab.h> |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 22 | #include <linux/rcupdate.h> |
Jan Kiszka | bd80158 | 2011-09-12 11:26:22 +0200 | [diff] [blame] | 23 | #include <linux/ratelimit.h> |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 24 | #include <linux/err.h> |
Frederic Weisbecker | c11f11f | 2013-01-21 00:50:22 +0100 | [diff] [blame] | 25 | #include <linux/irqflags.h> |
Frederic Weisbecker | 521921b | 2013-05-16 01:21:38 +0200 | [diff] [blame] | 26 | #include <linux/context_tracking.h> |
Eric Auger | 1a02b27 | 2015-09-18 22:29:43 +0800 | [diff] [blame] | 27 | #include <linux/irqbypass.h> |
Marcelo Tosatti | 8577370 | 2016-02-19 09:46:39 +0100 | [diff] [blame] | 28 | #include <linux/swait.h> |
Alexey Dobriyan | e8edc6e | 2007-05-21 01:22:52 +0400 | [diff] [blame] | 29 | #include <asm/signal.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 30 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 31 | #include <linux/kvm.h> |
Ingo Molnar | 102d832 | 2007-02-19 14:37:47 +0200 | [diff] [blame] | 32 | #include <linux/kvm_para.h> |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 33 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 34 | #include <linux/kvm_types.h> |
Hollis Blanchard | d77a39d9 | 2007-12-03 15:30:23 -0600 | [diff] [blame] | 35 | |
Avi Kivity | edf8841 | 2007-12-16 11:02:48 +0200 | [diff] [blame] | 36 | #include <asm/kvm_host.h> |
Zhang Xiantao | d657a98 | 2007-12-14 09:41:22 +0800 | [diff] [blame] | 37 | |
Greg Kurz | 0b1b1df | 2016-05-09 18:13:37 +0200 | [diff] [blame] | 38 | #ifndef KVM_MAX_VCPU_ID |
| 39 | #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS |
| 40 | #endif |
| 41 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 42 | /* |
Xiao Guangrong | 67b2920 | 2012-08-21 10:58:45 +0800 | [diff] [blame] | 43 | * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used |
| 44 | * in kvm, other bits are visible for userspace which are defined in |
| 45 | * include/linux/kvm_h. |
| 46 | */ |
| 47 | #define KVM_MEMSLOT_INVALID (1UL << 16) |
Ard Biesheuvel | 1050dcd | 2014-11-17 14:58:51 +0000 | [diff] [blame] | 48 | #define KVM_MEMSLOT_INCOHERENT (1UL << 17) |
Xiao Guangrong | 67b2920 | 2012-08-21 10:58:45 +0800 | [diff] [blame] | 49 | |
Xiao Guangrong | 87da7e6 | 2012-10-24 14:07:59 +0800 | [diff] [blame] | 50 | /* Two fragments for cross MMIO pages. */ |
| 51 | #define KVM_MAX_MMIO_FRAGMENTS 2 |
Avi Kivity | f78146b | 2012-04-18 19:22:47 +0300 | [diff] [blame] | 52 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 53 | #ifndef KVM_ADDRESS_SPACE_NUM |
| 54 | #define KVM_ADDRESS_SPACE_NUM 1 |
| 55 | #endif |
| 56 | |
Avi Kivity | f78146b | 2012-04-18 19:22:47 +0300 | [diff] [blame] | 57 | /* |
Xiao Guangrong | 9c5b117 | 2012-08-03 15:43:51 +0800 | [diff] [blame] | 58 | * For the normal pfn, the highest 12 bits should be zero, |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 59 | * so we can mask bit 62 ~ bit 52 to indicate the error pfn, |
| 60 | * mask bit 63 to indicate the noslot pfn. |
Xiao Guangrong | 9c5b117 | 2012-08-03 15:43:51 +0800 | [diff] [blame] | 61 | */ |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 62 | #define KVM_PFN_ERR_MASK (0x7ffULL << 52) |
| 63 | #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) |
| 64 | #define KVM_PFN_NOSLOT (0x1ULL << 63) |
Xiao Guangrong | 6c8ee57 | 2012-08-03 15:37:54 +0800 | [diff] [blame] | 65 | |
Xiao Guangrong | 9c5b117 | 2012-08-03 15:43:51 +0800 | [diff] [blame] | 66 | #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) |
| 67 | #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 68 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) |
Xiao Guangrong | 9c5b117 | 2012-08-03 15:43:51 +0800 | [diff] [blame] | 69 | |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 70 | /* |
| 71 | * error pfns indicate that the gfn is in slot but faild to |
| 72 | * translate it to pfn on host. |
| 73 | */ |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 74 | static inline bool is_error_pfn(kvm_pfn_t pfn) |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 75 | { |
Xiao Guangrong | 9c5b117 | 2012-08-03 15:43:51 +0800 | [diff] [blame] | 76 | return !!(pfn & KVM_PFN_ERR_MASK); |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 77 | } |
| 78 | |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 79 | /* |
| 80 | * error_noslot pfns indicate that the gfn can not be |
| 81 | * translated to pfn - it is not in slot or failed to |
| 82 | * translate it to pfn. |
| 83 | */ |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 84 | static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 85 | { |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 86 | return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 87 | } |
| 88 | |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 89 | /* noslot pfn indicates that the gfn is not in slot. */ |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 90 | static inline bool is_noslot_pfn(kvm_pfn_t pfn) |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 91 | { |
Xiao Guangrong | 81c52c5 | 2012-10-16 20:10:59 +0800 | [diff] [blame] | 92 | return pfn == KVM_PFN_NOSLOT; |
Xiao Guangrong | 83f0922 | 2012-08-03 15:39:59 +0800 | [diff] [blame] | 93 | } |
| 94 | |
Dominik Dingel | bf64087 | 2013-07-26 15:04:07 +0200 | [diff] [blame] | 95 | /* |
| 96 | * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) |
| 97 | * provide own defines and kvm_is_error_hva |
| 98 | */ |
| 99 | #ifndef KVM_HVA_ERR_BAD |
| 100 | |
Xiao Guangrong | 7068d09 | 2012-08-21 11:02:22 +0800 | [diff] [blame] | 101 | #define KVM_HVA_ERR_BAD (PAGE_OFFSET) |
| 102 | #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) |
Xiao Guangrong | ca3a490 | 2012-08-21 11:01:50 +0800 | [diff] [blame] | 103 | |
| 104 | static inline bool kvm_is_error_hva(unsigned long addr) |
| 105 | { |
Xiao Guangrong | 7068d09 | 2012-08-21 11:02:22 +0800 | [diff] [blame] | 106 | return addr >= PAGE_OFFSET; |
Xiao Guangrong | ca3a490 | 2012-08-21 11:01:50 +0800 | [diff] [blame] | 107 | } |
| 108 | |
Dominik Dingel | bf64087 | 2013-07-26 15:04:07 +0200 | [diff] [blame] | 109 | #endif |
| 110 | |
Xiao Guangrong | 6cede2e | 2012-08-03 15:41:22 +0800 | [diff] [blame] | 111 | #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) |
| 112 | |
Xiao Guangrong | 9c5b117 | 2012-08-03 15:43:51 +0800 | [diff] [blame] | 113 | static inline bool is_error_page(struct page *page) |
Xiao Guangrong | 6cede2e | 2012-08-03 15:41:22 +0800 | [diff] [blame] | 114 | { |
| 115 | return IS_ERR(page); |
| 116 | } |
| 117 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 118 | /* |
Paolo Bonzini | 2860c4b | 2016-01-07 15:05:10 +0100 | [diff] [blame] | 119 | * Architecture-independent vcpu->requests bit members |
| 120 | * Bits 4-7 are reserved for more arch-independent bits. |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 121 | */ |
Avi Kivity | 3176bc3 | 2007-10-16 17:22:08 +0200 | [diff] [blame] | 122 | #define KVM_REQ_TLB_FLUSH 0 |
Paolo Bonzini | 6662ba3 | 2016-01-07 15:02:44 +0100 | [diff] [blame] | 123 | #define KVM_REQ_MMU_RELOAD 1 |
| 124 | #define KVM_REQ_PENDING_TIMER 2 |
| 125 | #define KVM_REQ_UNHALT 3 |
Paolo Bonzini | 0cd3104 | 2016-01-07 15:00:53 +0100 | [diff] [blame] | 126 | |
Alex Williamson | 7a84428 | 2012-09-21 11:58:03 -0600 | [diff] [blame] | 127 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
| 128 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 |
Sheng Yang | 5550af4 | 2008-10-15 20:15:06 +0800 | [diff] [blame] | 129 | |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 130 | extern struct kmem_cache *kvm_vcpu_cache; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 131 | |
Paolo Bonzini | 2f303b7 | 2013-09-25 13:53:07 +0200 | [diff] [blame] | 132 | extern spinlock_t kvm_lock; |
Geoff Levand | fc1b749 | 2013-04-05 19:20:30 +0000 | [diff] [blame] | 133 | extern struct list_head vm_list; |
| 134 | |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 135 | struct kvm_io_range { |
| 136 | gpa_t addr; |
| 137 | int len; |
| 138 | struct kvm_io_device *dev; |
| 139 | }; |
| 140 | |
Amos Kong | 786a9f8 | 2012-03-09 12:17:40 +0800 | [diff] [blame] | 141 | #define NR_IOBUS_DEVS 1000 |
Amos Kong | a1300716 | 2012-03-09 12:17:32 +0800 | [diff] [blame] | 142 | |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 143 | struct kvm_io_bus { |
Amos Kong | 6ea34c9 | 2013-05-25 06:44:15 +0800 | [diff] [blame] | 144 | int dev_count; |
| 145 | int ioeventfd_count; |
Amos Kong | a1300716 | 2012-03-09 12:17:32 +0800 | [diff] [blame] | 146 | struct kvm_io_range range[]; |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 147 | }; |
| 148 | |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 149 | enum kvm_bus { |
| 150 | KVM_MMIO_BUS, |
| 151 | KVM_PIO_BUS, |
Cornelia Huck | 060f0ce | 2013-02-28 12:33:19 +0100 | [diff] [blame] | 152 | KVM_VIRTIO_CCW_NOTIFY_BUS, |
Michael S. Tsirkin | 68c3b4d | 2014-03-31 21:50:44 +0300 | [diff] [blame] | 153 | KVM_FAST_MMIO_BUS, |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 154 | KVM_NR_BUSES |
| 155 | }; |
| 156 | |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 157 | int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 158 | int len, const void *val); |
Nikolay Nikolaev | e32edf4 | 2015-03-26 14:39:28 +0000 | [diff] [blame] | 159 | int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, |
| 160 | gpa_t addr, int len, const void *val, long cookie); |
| 161 | int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
| 162 | int len, void *val); |
Sasha Levin | 743eeb0 | 2011-07-27 16:00:48 +0300 | [diff] [blame] | 163 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
| 164 | int len, struct kvm_io_device *dev); |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 165 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
| 166 | struct kvm_io_device *dev); |
Gregory Haskins | 2eeb2e9 | 2007-05-31 14:08:53 -0400 | [diff] [blame] | 167 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 168 | #ifdef CONFIG_KVM_ASYNC_PF |
| 169 | struct kvm_async_pf { |
| 170 | struct work_struct work; |
| 171 | struct list_head link; |
| 172 | struct list_head queue; |
| 173 | struct kvm_vcpu *vcpu; |
| 174 | struct mm_struct *mm; |
| 175 | gva_t gva; |
| 176 | unsigned long addr; |
| 177 | struct kvm_arch_async_pf arch; |
chai wen | f2e1066 | 2013-10-14 22:22:33 +0800 | [diff] [blame] | 178 | bool wakeup_all; |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 179 | }; |
| 180 | |
| 181 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); |
| 182 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); |
Dominik Dingel | e0ead41 | 2013-06-06 15:32:37 +0200 | [diff] [blame] | 183 | int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva, |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 184 | struct kvm_arch_async_pf *arch); |
Gleb Natapov | 344d958 | 2010-10-14 11:22:50 +0200 | [diff] [blame] | 185 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 186 | #endif |
| 187 | |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 188 | enum { |
| 189 | OUTSIDE_GUEST_MODE, |
| 190 | IN_GUEST_MODE, |
Avi Kivity | c142786 | 2012-05-14 15:44:06 +0300 | [diff] [blame] | 191 | EXITING_GUEST_MODE, |
| 192 | READING_SHADOW_PAGE_TABLES, |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 193 | }; |
| 194 | |
Avi Kivity | f78146b | 2012-04-18 19:22:47 +0300 | [diff] [blame] | 195 | /* |
| 196 | * Sometimes a large or cross-page mmio needs to be broken up into separate |
| 197 | * exits for userspace servicing. |
| 198 | */ |
| 199 | struct kvm_mmio_fragment { |
| 200 | gpa_t gpa; |
| 201 | void *data; |
| 202 | unsigned len; |
| 203 | }; |
| 204 | |
Zhang Xiantao | d657a98 | 2007-12-14 09:41:22 +0800 | [diff] [blame] | 205 | struct kvm_vcpu { |
Zhang Xiantao | d17fbbf | 2007-12-14 09:45:31 +0800 | [diff] [blame] | 206 | struct kvm *kvm; |
Hollis Blanchard | 31bb117 | 2008-01-28 17:42:34 -0600 | [diff] [blame] | 207 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
Zhang Xiantao | d17fbbf | 2007-12-14 09:45:31 +0800 | [diff] [blame] | 208 | struct preempt_notifier preempt_notifier; |
Hollis Blanchard | 31bb117 | 2008-01-28 17:42:34 -0600 | [diff] [blame] | 209 | #endif |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 210 | int cpu; |
Zhang Xiantao | d17fbbf | 2007-12-14 09:45:31 +0800 | [diff] [blame] | 211 | int vcpu_id; |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 212 | int srcu_idx; |
| 213 | int mode; |
Zhang Xiantao | d17fbbf | 2007-12-14 09:45:31 +0800 | [diff] [blame] | 214 | unsigned long requests; |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 215 | unsigned long guest_debug; |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 216 | |
Feng Wu | bf9f6ac | 2015-09-18 22:29:55 +0800 | [diff] [blame] | 217 | int pre_pcpu; |
| 218 | struct list_head blocked_vcpu_list; |
| 219 | |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 220 | struct mutex mutex; |
| 221 | struct kvm_run *run; |
Marcelo Tosatti | f656ce0 | 2009-12-23 14:35:25 -0200 | [diff] [blame] | 222 | |
Zhang Xiantao | d17fbbf | 2007-12-14 09:45:31 +0800 | [diff] [blame] | 223 | int fpu_active; |
Dexuan Cui | 2acf923 | 2010-06-10 11:27:12 +0800 | [diff] [blame] | 224 | int guest_fpu_loaded, guest_xcr0_loaded; |
Rik van Riel | 653f52c | 2015-04-23 11:52:37 -0400 | [diff] [blame] | 225 | unsigned char fpu_counter; |
Marcelo Tosatti | 8577370 | 2016-02-19 09:46:39 +0100 | [diff] [blame] | 226 | struct swait_queue_head wq; |
Rik van Riel | 34bb10b | 2011-02-01 09:52:41 -0500 | [diff] [blame] | 227 | struct pid *pid; |
Zhang Xiantao | d17fbbf | 2007-12-14 09:45:31 +0800 | [diff] [blame] | 228 | int sigset_active; |
| 229 | sigset_t sigset; |
| 230 | struct kvm_vcpu_stat stat; |
Wanpeng Li | 19020f8 | 2015-09-03 22:07:37 +0800 | [diff] [blame] | 231 | unsigned int halt_poll_ns; |
Christian Borntraeger | 3491caf | 2016-05-13 12:16:35 +0200 | [diff] [blame] | 232 | bool valid_wakeup; |
Zhang Xiantao | d17fbbf | 2007-12-14 09:45:31 +0800 | [diff] [blame] | 233 | |
| 234 | #ifdef CONFIG_HAS_IOMEM |
| 235 | int mmio_needed; |
| 236 | int mmio_read_completed; |
| 237 | int mmio_is_write; |
Avi Kivity | f78146b | 2012-04-18 19:22:47 +0300 | [diff] [blame] | 238 | int mmio_cur_fragment; |
| 239 | int mmio_nr_fragments; |
| 240 | struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; |
Zhang Xiantao | d17fbbf | 2007-12-14 09:45:31 +0800 | [diff] [blame] | 241 | #endif |
Zhang Xiantao | d657a98 | 2007-12-14 09:41:22 +0800 | [diff] [blame] | 242 | |
Gleb Natapov | af585b9 | 2010-10-14 11:22:46 +0200 | [diff] [blame] | 243 | #ifdef CONFIG_KVM_ASYNC_PF |
| 244 | struct { |
| 245 | u32 queued; |
| 246 | struct list_head queue; |
| 247 | struct list_head done; |
| 248 | spinlock_t lock; |
| 249 | } async_pf; |
| 250 | #endif |
| 251 | |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 252 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
| 253 | /* |
| 254 | * Cpu relax intercept or pause loop exit optimization |
| 255 | * in_spin_loop: set when a vcpu does a pause loop exit |
| 256 | * or cpu relax intercepted. |
| 257 | * dy_eligible: indicates whether vcpu is eligible for directed yield. |
| 258 | */ |
| 259 | struct { |
| 260 | bool in_spin_loop; |
| 261 | bool dy_eligible; |
| 262 | } spin_loop; |
| 263 | #endif |
Raghavendra K T | 3a08a8f | 2013-03-04 23:32:07 +0530 | [diff] [blame] | 264 | bool preempted; |
Zhang Xiantao | d657a98 | 2007-12-14 09:41:22 +0800 | [diff] [blame] | 265 | struct kvm_vcpu_arch arch; |
| 266 | }; |
| 267 | |
Xiao Guangrong | 6b7e2d0 | 2011-01-12 15:40:31 +0800 | [diff] [blame] | 268 | static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) |
| 269 | { |
| 270 | return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); |
| 271 | } |
| 272 | |
Takuya Yoshikawa | 660c22c | 2010-04-13 22:47:24 +0900 | [diff] [blame] | 273 | /* |
| 274 | * Some of the bitops functions do not support too long bitmaps. |
| 275 | * This number must be determined not to exceed such limits. |
| 276 | */ |
| 277 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) |
| 278 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 279 | struct kvm_memory_slot { |
| 280 | gfn_t base_gfn; |
| 281 | unsigned long npages; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 282 | unsigned long *dirty_bitmap; |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 283 | struct kvm_arch_memory_slot arch; |
Izik Eidus | 8a7ae05 | 2007-10-18 11:09:33 +0200 | [diff] [blame] | 284 | unsigned long userspace_addr; |
Alex Williamson | 6104f47 | 2012-12-10 10:33:26 -0700 | [diff] [blame] | 285 | u32 flags; |
Alex Williamson | 1e702d9 | 2012-12-10 10:33:32 -0700 | [diff] [blame] | 286 | short id; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 287 | }; |
| 288 | |
Takuya Yoshikawa | 87bf6e7 | 2010-04-12 19:35:35 +0900 | [diff] [blame] | 289 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) |
| 290 | { |
| 291 | return ALIGN(memslot->npages, BITS_PER_LONG) / 8; |
| 292 | } |
| 293 | |
Cornelia Huck | 8422359 | 2013-07-15 13:36:01 +0200 | [diff] [blame] | 294 | struct kvm_s390_adapter_int { |
| 295 | u64 ind_addr; |
| 296 | u64 summary_addr; |
| 297 | u64 ind_offset; |
| 298 | u32 summary_offset; |
| 299 | u32 adapter_id; |
| 300 | }; |
| 301 | |
Andrey Smetanin | 5c919412 | 2015-11-10 15:36:34 +0300 | [diff] [blame] | 302 | struct kvm_hv_sint { |
| 303 | u32 vcpu; |
| 304 | u32 sint; |
| 305 | }; |
| 306 | |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 307 | struct kvm_kernel_irq_routing_entry { |
| 308 | u32 gsi; |
Michael S. Tsirkin | 5116d8f | 2009-07-26 17:10:01 +0300 | [diff] [blame] | 309 | u32 type; |
Gleb Natapov | 4925663 | 2009-02-04 17:28:14 +0200 | [diff] [blame] | 310 | int (*set)(struct kvm_kernel_irq_routing_entry *e, |
Yang Zhang | aa2fbe6 | 2013-04-11 19:21:40 +0800 | [diff] [blame] | 311 | struct kvm *kvm, int irq_source_id, int level, |
| 312 | bool line_status); |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 313 | union { |
| 314 | struct { |
| 315 | unsigned irqchip; |
| 316 | unsigned pin; |
| 317 | } irqchip; |
Sheng Yang | 79950e1 | 2009-02-10 13:57:06 +0800 | [diff] [blame] | 318 | struct msi_msg msi; |
Cornelia Huck | 8422359 | 2013-07-15 13:36:01 +0200 | [diff] [blame] | 319 | struct kvm_s390_adapter_int adapter; |
Andrey Smetanin | 5c919412 | 2015-11-10 15:36:34 +0300 | [diff] [blame] | 320 | struct kvm_hv_sint hv_sint; |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 321 | }; |
Gleb Natapov | 46e624b | 2009-08-24 11:54:20 +0300 | [diff] [blame] | 322 | struct hlist_node link; |
| 323 | }; |
| 324 | |
Steve Rutherford | b053b2a | 2015-07-29 23:32:35 -0700 | [diff] [blame] | 325 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
| 326 | struct kvm_irq_routing_table { |
| 327 | int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; |
| 328 | u32 nr_rt_entries; |
| 329 | /* |
| 330 | * Array indexed by gsi. Each entry contains list of irq chips |
| 331 | * the gsi is connected to. |
| 332 | */ |
| 333 | struct hlist_head map[0]; |
| 334 | }; |
| 335 | #endif |
| 336 | |
Alex Williamson | 0743247 | 2012-12-10 10:33:15 -0700 | [diff] [blame] | 337 | #ifndef KVM_PRIVATE_MEM_SLOTS |
| 338 | #define KVM_PRIVATE_MEM_SLOTS 0 |
| 339 | #endif |
| 340 | |
Xiao Guangrong | 93a5cef | 2011-11-24 17:37:48 +0800 | [diff] [blame] | 341 | #ifndef KVM_MEM_SLOTS_NUM |
Alex Williamson | bbacc0c | 2012-12-10 10:33:09 -0700 | [diff] [blame] | 342 | #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS) |
Xiao Guangrong | 93a5cef | 2011-11-24 17:37:48 +0800 | [diff] [blame] | 343 | #endif |
| 344 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 345 | #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE |
| 346 | static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) |
| 347 | { |
| 348 | return 0; |
| 349 | } |
| 350 | #endif |
| 351 | |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 352 | /* |
| 353 | * Note: |
| 354 | * memslots are not sorted by id anymore, please use id_to_memslot() |
| 355 | * to get the memslot by its id. |
| 356 | */ |
Marcelo Tosatti | 46a26bf | 2009-12-23 14:35:16 -0200 | [diff] [blame] | 357 | struct kvm_memslots { |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 358 | u64 generation; |
Xiao Guangrong | 93a5cef | 2011-11-24 17:37:48 +0800 | [diff] [blame] | 359 | struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM]; |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 360 | /* The mapping table from slot id to the index in memslots[]. */ |
Alex Williamson | 1e702d9 | 2012-12-10 10:33:32 -0700 | [diff] [blame] | 361 | short id_to_index[KVM_MEM_SLOTS_NUM]; |
Igor Mammedov | d4ae84a0 | 2014-12-01 17:29:25 +0000 | [diff] [blame] | 362 | atomic_t lru_slot; |
Igor Mammedov | 9c1a5d38 | 2014-12-01 17:29:27 +0000 | [diff] [blame] | 363 | int used_slots; |
Marcelo Tosatti | 46a26bf | 2009-12-23 14:35:16 -0200 | [diff] [blame] | 364 | }; |
| 365 | |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 366 | struct kvm { |
Marcelo Tosatti | aaee2c9 | 2007-12-20 19:18:26 -0500 | [diff] [blame] | 367 | spinlock_t mmu_lock; |
Marcelo Tosatti | 79fac95 | 2009-12-23 14:35:26 -0200 | [diff] [blame] | 368 | struct mutex slots_lock; |
Avi Kivity | 6d4e4c4 | 2007-11-21 16:41:05 +0200 | [diff] [blame] | 369 | struct mm_struct *mm; /* userspace tied to this vm */ |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 370 | struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM]; |
Marcelo Tosatti | bc6678a | 2009-12-23 14:35:21 -0200 | [diff] [blame] | 371 | struct srcu_struct srcu; |
Christian Borntraeger | 719d93c | 2014-01-16 13:44:20 +0100 | [diff] [blame] | 372 | struct srcu_struct irq_srcu; |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 373 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 374 | atomic_t online_vcpus; |
Rik van Riel | 217ece6 | 2011-02-01 09:53:28 -0500 | [diff] [blame] | 375 | int last_boosted_vcpu; |
Avi Kivity | 133de90 | 2007-02-12 00:54:44 -0800 | [diff] [blame] | 376 | struct list_head vm_list; |
Marcelo Tosatti | 60eead7 | 2009-06-04 15:08:23 -0300 | [diff] [blame] | 377 | struct mutex lock; |
Marcelo Tosatti | e93f8a0 | 2009-12-23 14:35:24 -0200 | [diff] [blame] | 378 | struct kvm_io_bus *buses[KVM_NR_BUSES]; |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 379 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
| 380 | struct { |
| 381 | spinlock_t lock; |
| 382 | struct list_head items; |
Alex Williamson | 7a84428 | 2012-09-21 11:58:03 -0600 | [diff] [blame] | 383 | struct list_head resampler_list; |
| 384 | struct mutex resampler_lock; |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 385 | } irqfds; |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 386 | struct list_head ioeventfds; |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 387 | #endif |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 388 | struct kvm_vm_stat stat; |
Zhang Xiantao | d69fb81 | 2007-12-14 09:54:20 +0800 | [diff] [blame] | 389 | struct kvm_arch arch; |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 390 | atomic_t users_count; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 391 | #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 392 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
Sasha Levin | 2b3c246a | 2011-07-20 20:59:00 +0300 | [diff] [blame] | 393 | spinlock_t ring_lock; |
| 394 | struct list_head coalesced_zones; |
Laurent Vivier | 5f94c17 | 2008-05-30 16:05:54 +0200 | [diff] [blame] | 395 | #endif |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 396 | |
Marcelo Tosatti | 60eead7 | 2009-06-04 15:08:23 -0300 | [diff] [blame] | 397 | struct mutex irq_lock; |
Avi Kivity | 75858a8 | 2009-01-04 17:10:50 +0200 | [diff] [blame] | 398 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 399 | /* |
Paul Mackerras | 9957c86 | 2014-06-30 20:51:11 +1000 | [diff] [blame] | 400 | * Update side is protected by irq_lock. |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 401 | */ |
Arnd Bergmann | 4b6a287 | 2010-03-04 15:59:23 +0100 | [diff] [blame] | 402 | struct kvm_irq_routing_table __rcu *irq_routing; |
Paolo Bonzini | c77dcac | 2014-08-06 14:24:45 +0200 | [diff] [blame] | 403 | #endif |
| 404 | #ifdef CONFIG_HAVE_KVM_IRQFD |
Gleb Natapov | 136bdfe | 2009-08-24 11:54:23 +0300 | [diff] [blame] | 405 | struct hlist_head irq_ack_notifier_list; |
Avi Kivity | 75858a8 | 2009-01-04 17:10:50 +0200 | [diff] [blame] | 406 | #endif |
| 407 | |
Marc Zyngier | 36c1ed8 | 2012-06-15 15:07:24 -0400 | [diff] [blame] | 408 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 409 | struct mmu_notifier mmu_notifier; |
| 410 | unsigned long mmu_notifier_seq; |
| 411 | long mmu_notifier_count; |
| 412 | #endif |
Xiao Guangrong | a086f6a | 2014-04-17 17:06:12 +0800 | [diff] [blame] | 413 | long tlbs_dirty; |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 414 | struct list_head devices; |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 415 | struct dentry *debugfs_dentry; |
| 416 | struct kvm_stat_data **debugfs_stat_data; |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 417 | }; |
| 418 | |
Christoffer Dall | a737f25 | 2012-06-03 21:17:48 +0300 | [diff] [blame] | 419 | #define kvm_err(fmt, ...) \ |
| 420 | pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
| 421 | #define kvm_info(fmt, ...) \ |
| 422 | pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
| 423 | #define kvm_debug(fmt, ...) \ |
| 424 | pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) |
| 425 | #define kvm_pr_unimpl(fmt, ...) \ |
| 426 | pr_err_ratelimited("kvm [%i]: " fmt, \ |
| 427 | task_tgid_nr(current), ## __VA_ARGS__) |
Rusty Russell | f024247 | 2007-08-01 10:48:02 +1000 | [diff] [blame] | 428 | |
Christoffer Dall | a737f25 | 2012-06-03 21:17:48 +0300 | [diff] [blame] | 429 | /* The guest did something we don't support. */ |
| 430 | #define vcpu_unimpl(vcpu, fmt, ...) \ |
Borislav Petkov | 671d9ab | 2015-11-20 19:52:12 +0100 | [diff] [blame] | 431 | kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ |
| 432 | (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 433 | |
Andrey Smetanin | ee86dbc | 2015-07-03 15:01:35 +0300 | [diff] [blame] | 434 | #define vcpu_debug(vcpu, fmt, ...) \ |
| 435 | kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
Andrey Smetanin | 765eaa0 | 2015-11-30 19:22:20 +0300 | [diff] [blame] | 436 | #define vcpu_err(vcpu, fmt, ...) \ |
| 437 | kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) |
Andrey Smetanin | ee86dbc | 2015-07-03 15:01:35 +0300 | [diff] [blame] | 438 | |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 439 | static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) |
| 440 | { |
Paolo Bonzini | dd48924 | 2015-07-29 11:32:20 +0200 | [diff] [blame] | 441 | /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case |
| 442 | * the caller has read kvm->online_vcpus before (as is the case |
| 443 | * for kvm_for_each_vcpu, for example). |
| 444 | */ |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 445 | smp_rmb(); |
| 446 | return kvm->vcpus[i]; |
| 447 | } |
| 448 | |
| 449 | #define kvm_for_each_vcpu(idx, vcpup, kvm) \ |
Jeff Mahoney | b42fc3c | 2011-04-12 21:30:17 -0400 | [diff] [blame] | 450 | for (idx = 0; \ |
| 451 | idx < atomic_read(&kvm->online_vcpus) && \ |
| 452 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ |
| 453 | idx++) |
Gleb Natapov | 988a2ca | 2009-06-09 15:56:29 +0300 | [diff] [blame] | 454 | |
David Hildenbrand | db27a7a | 2015-11-05 09:03:50 +0100 | [diff] [blame] | 455 | static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) |
| 456 | { |
Greg Kurz | 9b9e3fc | 2016-05-09 18:11:54 +0200 | [diff] [blame] | 457 | struct kvm_vcpu *vcpu = NULL; |
David Hildenbrand | db27a7a | 2015-11-05 09:03:50 +0100 | [diff] [blame] | 458 | int i; |
| 459 | |
Greg Kurz | 9b9e3fc | 2016-05-09 18:11:54 +0200 | [diff] [blame] | 460 | if (id < 0) |
David Hildenbrand | c896939 | 2015-11-05 09:55:08 +0100 | [diff] [blame] | 461 | return NULL; |
Greg Kurz | 9b9e3fc | 2016-05-09 18:11:54 +0200 | [diff] [blame] | 462 | if (id < KVM_MAX_VCPUS) |
| 463 | vcpu = kvm_get_vcpu(kvm, id); |
David Hildenbrand | c896939 | 2015-11-05 09:55:08 +0100 | [diff] [blame] | 464 | if (vcpu && vcpu->vcpu_id == id) |
| 465 | return vcpu; |
David Hildenbrand | db27a7a | 2015-11-05 09:03:50 +0100 | [diff] [blame] | 466 | kvm_for_each_vcpu(i, vcpu, kvm) |
| 467 | if (vcpu->vcpu_id == id) |
| 468 | return vcpu; |
| 469 | return NULL; |
| 470 | } |
| 471 | |
Xiao Guangrong | be6ba0f | 2011-11-24 17:39:18 +0800 | [diff] [blame] | 472 | #define kvm_for_each_memslot(memslot, slots) \ |
| 473 | for (memslot = &slots->memslots[0]; \ |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 474 | memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\ |
| 475 | memslot++) |
Xiao Guangrong | be6ba0f | 2011-11-24 17:39:18 +0800 | [diff] [blame] | 476 | |
Rusty Russell | fb3f0f5 | 2007-07-27 17:16:56 +1000 | [diff] [blame] | 477 | int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id); |
| 478 | void kvm_vcpu_uninit(struct kvm_vcpu *vcpu); |
| 479 | |
Michael S. Tsirkin | 9fc7744 | 2012-09-16 11:50:30 +0300 | [diff] [blame] | 480 | int __must_check vcpu_load(struct kvm_vcpu *vcpu); |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 481 | void vcpu_put(struct kvm_vcpu *vcpu); |
| 482 | |
Paolo Bonzini | 6ef768f | 2014-11-20 13:45:31 +0100 | [diff] [blame] | 483 | #ifdef __KVM_HAVE_IOAPIC |
| 484 | void kvm_vcpu_request_scan_ioapic(struct kvm *kvm); |
Andrey Smetanin | abdb080 | 2015-11-10 15:36:31 +0300 | [diff] [blame] | 485 | void kvm_arch_post_irq_routing_update(struct kvm *kvm); |
Paolo Bonzini | 6ef768f | 2014-11-20 13:45:31 +0100 | [diff] [blame] | 486 | #else |
| 487 | static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm) |
| 488 | { |
| 489 | } |
Andrey Smetanin | abdb080 | 2015-11-10 15:36:31 +0300 | [diff] [blame] | 490 | static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) |
Steve Rutherford | b053b2a | 2015-07-29 23:32:35 -0700 | [diff] [blame] | 491 | { |
| 492 | } |
Paolo Bonzini | 6ef768f | 2014-11-20 13:45:31 +0100 | [diff] [blame] | 493 | #endif |
| 494 | |
Paul Mackerras | 297e210 | 2014-06-30 20:51:13 +1000 | [diff] [blame] | 495 | #ifdef CONFIG_HAVE_KVM_IRQFD |
Cornelia Huck | a0f155e | 2013-02-28 12:33:18 +0100 | [diff] [blame] | 496 | int kvm_irqfd_init(void); |
| 497 | void kvm_irqfd_exit(void); |
| 498 | #else |
| 499 | static inline int kvm_irqfd_init(void) |
| 500 | { |
| 501 | return 0; |
| 502 | } |
| 503 | |
| 504 | static inline void kvm_irqfd_exit(void) |
| 505 | { |
| 506 | } |
| 507 | #endif |
Avi Kivity | 0ee75be | 2010-04-28 15:39:01 +0300 | [diff] [blame] | 508 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
Rusty Russell | c16f862 | 2007-07-30 21:12:19 +1000 | [diff] [blame] | 509 | struct module *module); |
Zhang Xiantao | cb498ea | 2007-11-14 20:39:31 +0800 | [diff] [blame] | 510 | void kvm_exit(void); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 511 | |
Izik Eidus | d39f13b | 2008-03-30 16:01:25 +0300 | [diff] [blame] | 512 | void kvm_get_kvm(struct kvm *kvm); |
| 513 | void kvm_put_kvm(struct kvm *kvm); |
| 514 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 515 | static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 516 | { |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 517 | return rcu_dereference_check(kvm->memslots[as_id], |
Lai Jiangshan | 90d83dc | 2010-04-19 17:41:23 +0800 | [diff] [blame] | 518 | srcu_read_lock_held(&kvm->srcu) |
| 519 | || lockdep_is_held(&kvm->slots_lock)); |
| 520 | } |
| 521 | |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 522 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) |
| 523 | { |
| 524 | return __kvm_memslots(kvm, 0); |
| 525 | } |
| 526 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 527 | static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) |
| 528 | { |
Paolo Bonzini | f481b06 | 2015-05-17 17:30:37 +0200 | [diff] [blame] | 529 | int as_id = kvm_arch_vcpu_memslots_id(vcpu); |
| 530 | |
| 531 | return __kvm_memslots(vcpu->kvm, as_id); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 532 | } |
| 533 | |
Xiao Guangrong | 28a3754 | 2011-11-24 19:04:35 +0800 | [diff] [blame] | 534 | static inline struct kvm_memory_slot * |
| 535 | id_to_memslot(struct kvm_memslots *slots, int id) |
| 536 | { |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 537 | int index = slots->id_to_index[id]; |
| 538 | struct kvm_memory_slot *slot; |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 539 | |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 540 | slot = &slots->memslots[index]; |
Xiao Guangrong | bf3e05b | 2011-11-24 17:40:57 +0800 | [diff] [blame] | 541 | |
Xiao Guangrong | f85e2cb | 2011-11-24 17:41:54 +0800 | [diff] [blame] | 542 | WARN_ON(slot->id != id); |
| 543 | return slot; |
Xiao Guangrong | 28a3754 | 2011-11-24 19:04:35 +0800 | [diff] [blame] | 544 | } |
| 545 | |
Takuya Yoshikawa | 74d0727 | 2013-02-27 19:43:44 +0900 | [diff] [blame] | 546 | /* |
| 547 | * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: |
| 548 | * - create a new memory slot |
| 549 | * - delete an existing memory slot |
| 550 | * - modify an existing memory slot |
| 551 | * -- move it in the guest physical memory space |
| 552 | * -- just change its flags |
| 553 | * |
| 554 | * Since flags can be changed by some of these operations, the following |
| 555 | * differentiation is the best we can do for __kvm_set_memory_region(): |
| 556 | */ |
| 557 | enum kvm_mr_change { |
| 558 | KVM_MR_CREATE, |
| 559 | KVM_MR_DELETE, |
| 560 | KVM_MR_MOVE, |
| 561 | KVM_MR_FLAGS_ONLY, |
| 562 | }; |
| 563 | |
Izik Eidus | 210c7c4 | 2007-10-24 23:52:57 +0200 | [diff] [blame] | 564 | int kvm_set_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 565 | const struct kvm_userspace_memory_region *mem); |
Sheng Yang | f78e0e2 | 2007-10-29 09:40:42 +0800 | [diff] [blame] | 566 | int __kvm_set_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 567 | const struct kvm_userspace_memory_region *mem); |
Aneesh Kumar K.V | 5587027 | 2013-10-07 22:18:00 +0530 | [diff] [blame] | 568 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 569 | struct kvm_memory_slot *dont); |
Aneesh Kumar K.V | 5587027 | 2013-10-07 22:18:00 +0530 | [diff] [blame] | 570 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
| 571 | unsigned long npages); |
Paolo Bonzini | 15f4601 | 2015-05-17 21:26:08 +0200 | [diff] [blame] | 572 | void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots); |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 573 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
| 574 | struct kvm_memory_slot *memslot, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 575 | const struct kvm_userspace_memory_region *mem, |
Takuya Yoshikawa | 7b6195a | 2013-02-27 19:44:34 +0900 | [diff] [blame] | 576 | enum kvm_mr_change change); |
Marcelo Tosatti | f7784b8 | 2009-12-23 14:35:18 -0200 | [diff] [blame] | 577 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
Paolo Bonzini | 09170a4 | 2015-05-18 13:59:39 +0200 | [diff] [blame] | 578 | const struct kvm_userspace_memory_region *mem, |
Takuya Yoshikawa | 8482644 | 2013-02-27 19:45:25 +0900 | [diff] [blame] | 579 | const struct kvm_memory_slot *old, |
Paolo Bonzini | f36f3f2 | 2015-05-18 13:20:23 +0200 | [diff] [blame] | 580 | const struct kvm_memory_slot *new, |
Takuya Yoshikawa | 8482644 | 2013-02-27 19:45:25 +0900 | [diff] [blame] | 581 | enum kvm_mr_change change); |
Takuya Yoshikawa | db3fe4e | 2012-02-08 13:02:18 +0900 | [diff] [blame] | 582 | bool kvm_largepages_enabled(void); |
Marcelo Tosatti | 54dee99 | 2009-06-11 12:07:44 -0300 | [diff] [blame] | 583 | void kvm_disable_largepages(void); |
Marcelo Tosatti | 2df72e9 | 2012-08-24 15:54:57 -0300 | [diff] [blame] | 584 | /* flush all memory translations */ |
| 585 | void kvm_arch_flush_shadow_all(struct kvm *kvm); |
| 586 | /* flush memory translations pointing to 'slot' */ |
| 587 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
| 588 | struct kvm_memory_slot *slot); |
Marcelo Tosatti | a983fb2 | 2009-12-23 14:35:23 -0200 | [diff] [blame] | 589 | |
Paolo Bonzini | d9ef13c | 2015-05-19 16:01:50 +0200 | [diff] [blame] | 590 | int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
| 591 | struct page **pages, int nr_pages); |
Xiao Guangrong | 4898778 | 2010-08-22 19:11:43 +0800 | [diff] [blame] | 592 | |
Avi Kivity | 954bbbc | 2007-03-30 14:02:32 +0300 | [diff] [blame] | 593 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
Marcelo Tosatti | 05da455 | 2008-02-23 11:44:30 -0300 | [diff] [blame] | 594 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
Paolo Bonzini | ba6a354 | 2013-09-09 13:52:33 +0200 | [diff] [blame] | 595 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); |
Xiao Guangrong | 4d8b81a | 2012-08-21 11:02:51 +0800 | [diff] [blame] | 596 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
Christoffer Dall | 64d8312 | 2014-08-19 12:15:00 +0200 | [diff] [blame] | 597 | unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, |
| 598 | bool *writable); |
Izik Eidus | b4231d6 | 2007-11-20 11:49:33 +0200 | [diff] [blame] | 599 | void kvm_release_page_clean(struct page *page); |
| 600 | void kvm_release_page_dirty(struct page *page); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 601 | void kvm_set_page_accessed(struct page *page); |
| 602 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 603 | kvm_pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn); |
| 604 | kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
| 605 | kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, |
Marcelo Tosatti | 612819c | 2010-10-22 14:18:18 -0200 | [diff] [blame] | 606 | bool *writable); |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 607 | kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
| 608 | kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); |
| 609 | kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, |
| 610 | bool atomic, bool *async, bool write_fault, |
| 611 | bool *writable); |
Xiao Guangrong | 037d92d | 2012-08-21 10:59:12 +0800 | [diff] [blame] | 612 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 613 | void kvm_release_pfn_clean(kvm_pfn_t pfn); |
| 614 | void kvm_set_pfn_dirty(kvm_pfn_t pfn); |
| 615 | void kvm_set_pfn_accessed(kvm_pfn_t pfn); |
| 616 | void kvm_get_pfn(kvm_pfn_t pfn); |
Anthony Liguori | 35149e2 | 2008-04-02 14:46:56 -0500 | [diff] [blame] | 617 | |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 618 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
| 619 | int len); |
Marcelo Tosatti | 7ec5458 | 2007-12-20 19:18:23 -0500 | [diff] [blame] | 620 | int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data, |
| 621 | unsigned long len); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 622 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); |
Gleb Natapov | e03b644 | 2011-07-11 15:28:11 -0400 | [diff] [blame] | 623 | int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 624 | void *data, unsigned long len); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 625 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
| 626 | int offset, int len); |
| 627 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, |
| 628 | unsigned long len); |
Gleb Natapov | 49c7754 | 2010-10-18 15:22:23 +0200 | [diff] [blame] | 629 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
| 630 | void *data, unsigned long len); |
| 631 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
Andrew Honig | 8f96452 | 2013-03-29 09:35:21 -0700 | [diff] [blame] | 632 | gpa_t gpa, unsigned long len); |
Izik Eidus | 195aefd | 2007-10-01 22:14:18 +0200 | [diff] [blame] | 633 | int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len); |
| 634 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 635 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
Yaowei Bai | 33e9415 | 2015-11-14 11:21:06 +0800 | [diff] [blame] | 636 | bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
Joerg Roedel | 8f0b1ab | 2010-01-28 12:37:56 +0100 | [diff] [blame] | 637 | unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 638 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
| 639 | |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 640 | struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); |
| 641 | struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 642 | kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); |
| 643 | kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
Paolo Bonzini | 8e73485 | 2015-05-17 13:58:53 +0200 | [diff] [blame] | 644 | struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); |
| 645 | unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); |
| 646 | unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); |
| 647 | int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, |
| 648 | int len); |
| 649 | int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, |
| 650 | unsigned long len); |
| 651 | int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, |
| 652 | unsigned long len); |
| 653 | int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, |
| 654 | int offset, int len); |
| 655 | int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, |
| 656 | unsigned long len); |
| 657 | void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); |
| 658 | |
Hollis Blanchard | 8776e51 | 2007-10-31 17:24:24 -0500 | [diff] [blame] | 659 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
Christoffer Dall | 3217f7c | 2015-08-27 16:41:15 +0200 | [diff] [blame] | 660 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); |
| 661 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); |
Radim Krčmář | dd1a4cc | 2016-05-04 14:09:44 -0500 | [diff] [blame] | 662 | void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 663 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
Dan Carpenter | fa93384 | 2014-05-23 13:20:42 +0300 | [diff] [blame] | 664 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); |
Zhai, Edwin | d255f4f | 2009-10-09 18:03:20 +0800 | [diff] [blame] | 665 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu); |
Avi Kivity | 7702fd1 | 2007-06-14 16:27:40 +0300 | [diff] [blame] | 666 | void kvm_load_guest_fpu(struct kvm_vcpu *vcpu); |
| 667 | void kvm_put_guest_fpu(struct kvm_vcpu *vcpu); |
Xiao Guangrong | a4ee1ca | 2010-11-23 11:13:00 +0800 | [diff] [blame] | 668 | |
Avi Kivity | d9e368d | 2007-06-07 19:18:30 +0300 | [diff] [blame] | 669 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
Marcelo Tosatti | 2e53d63 | 2008-02-20 14:47:24 -0500 | [diff] [blame] | 670 | void kvm_reload_remote_mmus(struct kvm *kvm); |
Tang Chen | 445b823 | 2014-09-24 15:57:55 +0800 | [diff] [blame] | 671 | bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 672 | |
Carsten Otte | 043405e | 2007-10-10 17:16:19 +0200 | [diff] [blame] | 673 | long kvm_arch_dev_ioctl(struct file *filp, |
| 674 | unsigned int ioctl, unsigned long arg); |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 675 | long kvm_arch_vcpu_ioctl(struct file *filp, |
| 676 | unsigned int ioctl, unsigned long arg); |
Carsten Otte | 5b1c149 | 2012-01-04 10:25:23 +0100 | [diff] [blame] | 677 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); |
Zhang Xiantao | 018d00d | 2007-11-15 23:07:47 +0800 | [diff] [blame] | 678 | |
Alexander Graf | 784aa3d | 2014-07-14 18:27:35 +0200 | [diff] [blame] | 679 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); |
Zhang Xiantao | 018d00d | 2007-11-15 23:07:47 +0800 | [diff] [blame] | 680 | |
Zhang Xiantao | 5bb064d | 2007-11-18 20:29:43 +0800 | [diff] [blame] | 681 | int kvm_get_dirty_log(struct kvm *kvm, |
| 682 | struct kvm_dirty_log *log, int *is_dirty); |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 683 | |
| 684 | int kvm_get_dirty_log_protect(struct kvm *kvm, |
| 685 | struct kvm_dirty_log *log, bool *is_dirty); |
| 686 | |
Kai Huang | 3b0f1d0 | 2015-01-28 10:54:23 +0800 | [diff] [blame] | 687 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
Mario Smarduch | ba0513b | 2015-01-15 15:58:53 -0800 | [diff] [blame] | 688 | struct kvm_memory_slot *slot, |
| 689 | gfn_t gfn_offset, |
| 690 | unsigned long mask); |
| 691 | |
Zhang Xiantao | 5bb064d | 2007-11-18 20:29:43 +0800 | [diff] [blame] | 692 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, |
| 693 | struct kvm_dirty_log *log); |
| 694 | |
Yang Zhang | aa2fbe6 | 2013-04-11 19:21:40 +0800 | [diff] [blame] | 695 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
| 696 | bool line_status); |
Carsten Otte | 1fe779f | 2007-10-29 16:08:35 +0100 | [diff] [blame] | 697 | long kvm_arch_vm_ioctl(struct file *filp, |
| 698 | unsigned int ioctl, unsigned long arg); |
Carsten Otte | 313a3dc | 2007-10-11 19:16:52 +0200 | [diff] [blame] | 699 | |
Hollis Blanchard | d075206 | 2007-10-31 17:24:25 -0500 | [diff] [blame] | 700 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
| 701 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
| 702 | |
Zhang Xiantao | 8b00679 | 2007-11-16 13:05:55 +0800 | [diff] [blame] | 703 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
| 704 | struct kvm_translation *tr); |
| 705 | |
Hollis Blanchard | b6c7a5d | 2007-11-01 14:16:10 -0500 | [diff] [blame] | 706 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
| 707 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
| 708 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
| 709 | struct kvm_sregs *sregs); |
| 710 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
| 711 | struct kvm_sregs *sregs); |
Marcelo Tosatti | 62d9f0d | 2008-04-11 13:24:45 -0300 | [diff] [blame] | 712 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
| 713 | struct kvm_mp_state *mp_state); |
| 714 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
| 715 | struct kvm_mp_state *mp_state); |
Jan Kiszka | d0bfb94 | 2008-12-15 13:52:10 +0100 | [diff] [blame] | 716 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
| 717 | struct kvm_guest_debug *dbg); |
Hollis Blanchard | b6c7a5d | 2007-11-01 14:16:10 -0500 | [diff] [blame] | 718 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run); |
| 719 | |
Zhang Xiantao | f8c16bb | 2007-11-14 20:40:21 +0800 | [diff] [blame] | 720 | int kvm_arch_init(void *opaque); |
| 721 | void kvm_arch_exit(void); |
Carsten Otte | 043405e | 2007-10-10 17:16:19 +0200 | [diff] [blame] | 722 | |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 723 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu); |
| 724 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu); |
| 725 | |
Radim Krčmář | e790d9e | 2014-08-21 18:08:05 +0200 | [diff] [blame] | 726 | void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); |
| 727 | |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 728 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu); |
| 729 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
| 730 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); |
| 731 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id); |
Avi Kivity | 26e5215 | 2007-11-20 15:30:24 +0200 | [diff] [blame] | 732 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu); |
Dominik Dingel | 31928aa | 2014-12-04 15:47:07 +0100 | [diff] [blame] | 733 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); |
Hollis Blanchard | d40ccc6 | 2007-11-19 14:04:43 -0600 | [diff] [blame] | 734 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 735 | |
Radim Krčmář | 13a34e0 | 2014-08-28 15:13:03 +0200 | [diff] [blame] | 736 | int kvm_arch_hardware_enable(void); |
| 737 | void kvm_arch_hardware_disable(void); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 738 | int kvm_arch_hardware_setup(void); |
| 739 | void kvm_arch_hardware_unsetup(void); |
| 740 | void kvm_arch_check_processor_compat(void *rtn); |
Zhang Xiantao | 1d737c8 | 2007-12-14 09:35:10 +0800 | [diff] [blame] | 741 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 742 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 743 | |
Takuya Yoshikawa | c1a7b32 | 2012-05-20 13:15:07 +0900 | [diff] [blame] | 744 | void *kvm_kvzalloc(unsigned long size); |
Takuya Yoshikawa | c1a7b32 | 2012-05-20 13:15:07 +0900 | [diff] [blame] | 745 | |
Jan Kiszka | d89f5ef | 2010-11-09 17:02:49 +0100 | [diff] [blame] | 746 | #ifndef __KVM_HAVE_ARCH_VM_ALLOC |
| 747 | static inline struct kvm *kvm_arch_alloc_vm(void) |
| 748 | { |
| 749 | return kzalloc(sizeof(struct kvm), GFP_KERNEL); |
| 750 | } |
| 751 | |
| 752 | static inline void kvm_arch_free_vm(struct kvm *kvm) |
| 753 | { |
| 754 | kfree(kvm); |
| 755 | } |
| 756 | #endif |
| 757 | |
Alex Williamson | e0f0bbc | 2013-10-30 11:02:30 -0600 | [diff] [blame] | 758 | #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA |
| 759 | void kvm_arch_register_noncoherent_dma(struct kvm *kvm); |
| 760 | void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); |
| 761 | bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); |
| 762 | #else |
| 763 | static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) |
| 764 | { |
| 765 | } |
| 766 | |
| 767 | static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) |
| 768 | { |
| 769 | } |
| 770 | |
| 771 | static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) |
| 772 | { |
| 773 | return false; |
| 774 | } |
| 775 | #endif |
Paolo Bonzini | 5544eb9 | 2015-07-07 15:41:58 +0200 | [diff] [blame] | 776 | #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE |
| 777 | void kvm_arch_start_assignment(struct kvm *kvm); |
| 778 | void kvm_arch_end_assignment(struct kvm *kvm); |
| 779 | bool kvm_arch_has_assigned_device(struct kvm *kvm); |
| 780 | #else |
| 781 | static inline void kvm_arch_start_assignment(struct kvm *kvm) |
| 782 | { |
| 783 | } |
| 784 | |
| 785 | static inline void kvm_arch_end_assignment(struct kvm *kvm) |
| 786 | { |
| 787 | } |
| 788 | |
| 789 | static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) |
| 790 | { |
| 791 | return false; |
| 792 | } |
| 793 | #endif |
Alex Williamson | e0f0bbc | 2013-10-30 11:02:30 -0600 | [diff] [blame] | 794 | |
Marcelo Tosatti | 8577370 | 2016-02-19 09:46:39 +0100 | [diff] [blame] | 795 | static inline struct swait_queue_head *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu) |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 796 | { |
Alexander Graf | 2246f8b | 2012-03-13 22:35:01 +0100 | [diff] [blame] | 797 | #ifdef __KVM_HAVE_ARCH_WQP |
| 798 | return vcpu->arch.wqp; |
| 799 | #else |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 800 | return &vcpu->wq; |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 801 | #endif |
Alexander Graf | 2246f8b | 2012-03-13 22:35:01 +0100 | [diff] [blame] | 802 | } |
Christoffer Dall | b6d3383 | 2012-03-08 16:44:24 -0500 | [diff] [blame] | 803 | |
Eric Auger | 01c94e6 | 2015-03-04 11:14:33 +0100 | [diff] [blame] | 804 | #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED |
| 805 | /* |
| 806 | * returns true if the virtual interrupt controller is initialized and |
| 807 | * ready to accept virtual IRQ. On some architectures the virtual interrupt |
| 808 | * controller is dynamically instantiated and this is not always true. |
| 809 | */ |
| 810 | bool kvm_arch_intc_initialized(struct kvm *kvm); |
| 811 | #else |
| 812 | static inline bool kvm_arch_intc_initialized(struct kvm *kvm) |
| 813 | { |
| 814 | return true; |
| 815 | } |
| 816 | #endif |
| 817 | |
Carsten Otte | e08b963 | 2012-01-04 10:25:20 +0100 | [diff] [blame] | 818 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); |
Zhang Xiantao | d19a9cd | 2007-11-18 18:43:45 +0800 | [diff] [blame] | 819 | void kvm_arch_destroy_vm(struct kvm *kvm); |
Sheng Yang | ad8ba2c | 2009-01-06 10:03:02 +0800 | [diff] [blame] | 820 | void kvm_arch_sync_events(struct kvm *kvm); |
Zhang Xiantao | e9b11c1 | 2007-11-14 20:38:21 +0800 | [diff] [blame] | 821 | |
Marcelo Tosatti | 3d80840 | 2008-04-11 14:53:26 -0300 | [diff] [blame] | 822 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
Zhang Xiantao | 5736199 | 2007-12-17 14:21:40 +0800 | [diff] [blame] | 823 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
Zhang Xiantao | 682c59a | 2007-12-11 20:36:00 +0800 | [diff] [blame] | 824 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 825 | bool kvm_is_reserved_pfn(kvm_pfn_t pfn); |
Xiantao Zhang | c77fb9d | 2008-09-27 10:55:40 +0800 | [diff] [blame] | 826 | |
Ben-Ami Yassour | 62c476c | 2008-09-14 03:48:28 +0300 | [diff] [blame] | 827 | struct kvm_irq_ack_notifier { |
| 828 | struct hlist_node link; |
| 829 | unsigned gsi; |
| 830 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); |
| 831 | }; |
| 832 | |
Paul Mackerras | 9957c86 | 2014-06-30 20:51:11 +1000 | [diff] [blame] | 833 | int kvm_irq_map_gsi(struct kvm *kvm, |
| 834 | struct kvm_kernel_irq_routing_entry *entries, int gsi); |
| 835 | int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); |
Paul Mackerras | 8ba918d | 2014-06-30 20:51:10 +1000 | [diff] [blame] | 836 | |
Yang Zhang | aa2fbe6 | 2013-04-11 19:21:40 +0800 | [diff] [blame] | 837 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, |
| 838 | bool line_status); |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 839 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
Yang Zhang | aa2fbe6 | 2013-04-11 19:21:40 +0800 | [diff] [blame] | 840 | int irq_source_id, int level, bool line_status); |
Paolo Bonzini | b97e6de | 2015-10-28 19:16:47 +0100 | [diff] [blame] | 841 | int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, |
| 842 | struct kvm *kvm, int irq_source_id, |
| 843 | int level, bool line_status); |
Yang Zhang | c7c9c56 | 2013-01-25 10:18:51 +0800 | [diff] [blame] | 844 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); |
Andrey Smetanin | ba1aefc | 2015-10-16 10:07:46 +0300 | [diff] [blame] | 845 | void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); |
Marcelo Tosatti | 44882ee | 2009-01-27 15:12:38 -0200 | [diff] [blame] | 846 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
Xiantao Zhang | 3de42dc | 2008-10-06 13:48:45 +0800 | [diff] [blame] | 847 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
| 848 | struct kvm_irq_ack_notifier *kian); |
Marcelo Tosatti | fa40a82 | 2009-06-04 15:08:24 -0300 | [diff] [blame] | 849 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, |
| 850 | struct kvm_irq_ack_notifier *kian); |
Sheng Yang | 5550af4 | 2008-10-15 20:15:06 +0800 | [diff] [blame] | 851 | int kvm_request_irq_source_id(struct kvm *kvm); |
| 852 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); |
Ben-Ami Yassour | 62c476c | 2008-09-14 03:48:28 +0300 | [diff] [blame] | 853 | |
Alex Williamson | 2a5bab1 | 2013-04-16 13:49:18 -0600 | [diff] [blame] | 854 | #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT |
Marcelo Tosatti | 3ad26d8 | 2009-12-23 14:35:20 -0200 | [diff] [blame] | 855 | int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
Alex Williamson | 32f6daa | 2012-04-11 09:51:49 -0600 | [diff] [blame] | 856 | void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot); |
Alex Williamson | 2a5bab1 | 2013-04-16 13:49:18 -0600 | [diff] [blame] | 857 | #else |
Ben-Ami Yassour | 62c476c | 2008-09-14 03:48:28 +0300 | [diff] [blame] | 858 | static inline int kvm_iommu_map_pages(struct kvm *kvm, |
Jan Kiszka | d7a79b6 | 2010-10-14 13:59:04 +0200 | [diff] [blame] | 859 | struct kvm_memory_slot *slot) |
Ben-Ami Yassour | 62c476c | 2008-09-14 03:48:28 +0300 | [diff] [blame] | 860 | { |
| 861 | return 0; |
| 862 | } |
| 863 | |
Alex Williamson | 32f6daa | 2012-04-11 09:51:49 -0600 | [diff] [blame] | 864 | static inline void kvm_iommu_unmap_pages(struct kvm *kvm, |
| 865 | struct kvm_memory_slot *slot) |
| 866 | { |
| 867 | } |
Alex Williamson | 2a5bab1 | 2013-04-16 13:49:18 -0600 | [diff] [blame] | 868 | #endif |
Ben-Ami Yassour | 62c476c | 2008-09-14 03:48:28 +0300 | [diff] [blame] | 869 | |
Christian Borntraeger | 0097d12 | 2015-04-30 13:43:30 +0200 | [diff] [blame] | 870 | /* must be called with irqs disabled */ |
| 871 | static inline void __kvm_guest_enter(void) |
Frederic Weisbecker | c11f11f | 2013-01-21 00:50:22 +0100 | [diff] [blame] | 872 | { |
Frederic Weisbecker | c11f11f | 2013-01-21 00:50:22 +0100 | [diff] [blame] | 873 | guest_enter(); |
Gleb Natapov | 8fa2206 | 2011-05-04 16:31:04 +0300 | [diff] [blame] | 874 | /* KVM does not hold any references to rcu protected data when it |
| 875 | * switches CPU into a guest mode. In fact switching to a guest mode |
Michael S. Tsirkin | 81e87e2 | 2013-10-30 21:43:01 +0200 | [diff] [blame] | 876 | * is very similar to exiting to userspace from rcu point of view. In |
Gleb Natapov | 8fa2206 | 2011-05-04 16:31:04 +0300 | [diff] [blame] | 877 | * addition CPU may stay in a guest mode for quite a long time (up to |
| 878 | * one time slice). Lets treat guest mode as quiescent state, just like |
| 879 | * we do with user-mode execution. |
| 880 | */ |
Rik van Riel | 126a6a5 | 2015-02-10 15:27:54 -0500 | [diff] [blame] | 881 | if (!context_tracking_cpu_is_enabled()) |
| 882 | rcu_virt_note_context_switch(smp_processor_id()); |
Laurent Vivier | d172fcd | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 883 | } |
| 884 | |
Christian Borntraeger | 0097d12 | 2015-04-30 13:43:30 +0200 | [diff] [blame] | 885 | /* must be called with irqs disabled */ |
| 886 | static inline void __kvm_guest_exit(void) |
| 887 | { |
| 888 | guest_exit(); |
| 889 | } |
| 890 | |
| 891 | static inline void kvm_guest_enter(void) |
| 892 | { |
| 893 | unsigned long flags; |
| 894 | |
| 895 | local_irq_save(flags); |
| 896 | __kvm_guest_enter(); |
| 897 | local_irq_restore(flags); |
| 898 | } |
| 899 | |
Laurent Vivier | d172fcd | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 900 | static inline void kvm_guest_exit(void) |
| 901 | { |
Frederic Weisbecker | c11f11f | 2013-01-21 00:50:22 +0100 | [diff] [blame] | 902 | unsigned long flags; |
| 903 | |
| 904 | local_irq_save(flags); |
Christian Borntraeger | 0097d12 | 2015-04-30 13:43:30 +0200 | [diff] [blame] | 905 | __kvm_guest_exit(); |
Frederic Weisbecker | c11f11f | 2013-01-21 00:50:22 +0100 | [diff] [blame] | 906 | local_irq_restore(flags); |
Laurent Vivier | d172fcd | 2007-10-15 17:00:19 +0200 | [diff] [blame] | 907 | } |
| 908 | |
Paul Mackerras | 9d4cba7 | 2012-01-12 20:09:51 +0000 | [diff] [blame] | 909 | /* |
| 910 | * search_memslots() and __gfn_to_memslot() are here because they are |
| 911 | * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. |
| 912 | * gfn_to_memslot() itself isn't here as an inline because that would |
| 913 | * bloat other code too much. |
| 914 | */ |
| 915 | static inline struct kvm_memory_slot * |
| 916 | search_memslots(struct kvm_memslots *slots, gfn_t gfn) |
| 917 | { |
Igor Mammedov | 9c1a5d38 | 2014-12-01 17:29:27 +0000 | [diff] [blame] | 918 | int start = 0, end = slots->used_slots; |
Igor Mammedov | d4ae84a0 | 2014-12-01 17:29:25 +0000 | [diff] [blame] | 919 | int slot = atomic_read(&slots->lru_slot); |
Igor Mammedov | 9c1a5d38 | 2014-12-01 17:29:27 +0000 | [diff] [blame] | 920 | struct kvm_memory_slot *memslots = slots->memslots; |
Igor Mammedov | d4ae84a0 | 2014-12-01 17:29:25 +0000 | [diff] [blame] | 921 | |
Igor Mammedov | 9c1a5d38 | 2014-12-01 17:29:27 +0000 | [diff] [blame] | 922 | if (gfn >= memslots[slot].base_gfn && |
| 923 | gfn < memslots[slot].base_gfn + memslots[slot].npages) |
| 924 | return &memslots[slot]; |
Paul Mackerras | 9d4cba7 | 2012-01-12 20:09:51 +0000 | [diff] [blame] | 925 | |
Igor Mammedov | 9c1a5d38 | 2014-12-01 17:29:27 +0000 | [diff] [blame] | 926 | while (start < end) { |
| 927 | slot = start + (end - start) / 2; |
| 928 | |
| 929 | if (gfn >= memslots[slot].base_gfn) |
| 930 | end = slot; |
| 931 | else |
| 932 | start = slot + 1; |
| 933 | } |
| 934 | |
| 935 | if (gfn >= memslots[start].base_gfn && |
| 936 | gfn < memslots[start].base_gfn + memslots[start].npages) { |
| 937 | atomic_set(&slots->lru_slot, start); |
| 938 | return &memslots[start]; |
| 939 | } |
Paul Mackerras | 9d4cba7 | 2012-01-12 20:09:51 +0000 | [diff] [blame] | 940 | |
| 941 | return NULL; |
| 942 | } |
| 943 | |
| 944 | static inline struct kvm_memory_slot * |
| 945 | __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) |
| 946 | { |
| 947 | return search_memslots(slots, gfn); |
| 948 | } |
| 949 | |
Gavin Shan | 66a0350 | 2012-08-24 16:50:28 +0800 | [diff] [blame] | 950 | static inline unsigned long |
| 951 | __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn) |
| 952 | { |
| 953 | return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE; |
| 954 | } |
| 955 | |
Xiao Guangrong | 0ee8dcb | 2011-03-09 15:41:59 +0800 | [diff] [blame] | 956 | static inline int memslot_id(struct kvm *kvm, gfn_t gfn) |
| 957 | { |
| 958 | return gfn_to_memslot(kvm, gfn)->id; |
| 959 | } |
| 960 | |
Takuya Yoshikawa | d19a748 | 2012-07-02 17:54:30 +0900 | [diff] [blame] | 961 | static inline gfn_t |
| 962 | hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 963 | { |
Takuya Yoshikawa | d19a748 | 2012-07-02 17:54:30 +0900 | [diff] [blame] | 964 | gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; |
| 965 | |
| 966 | return slot->base_gfn + gfn_offset; |
Xiao Guangrong | 887c08a | 2010-08-22 19:10:28 +0800 | [diff] [blame] | 967 | } |
| 968 | |
Avi Kivity | 1755fbc | 2007-11-21 14:44:45 +0200 | [diff] [blame] | 969 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
| 970 | { |
| 971 | return (gpa_t)gfn << PAGE_SHIFT; |
| 972 | } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 973 | |
Joerg Roedel | c30a358 | 2010-09-10 17:30:48 +0200 | [diff] [blame] | 974 | static inline gfn_t gpa_to_gfn(gpa_t gpa) |
| 975 | { |
| 976 | return (gfn_t)(gpa >> PAGE_SHIFT); |
| 977 | } |
| 978 | |
Dan Williams | ba049e9 | 2016-01-15 16:56:11 -0800 | [diff] [blame] | 979 | static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) |
Ben-Ami Yassour | 62c476c | 2008-09-14 03:48:28 +0300 | [diff] [blame] | 980 | { |
| 981 | return (hpa_t)pfn << PAGE_SHIFT; |
| 982 | } |
| 983 | |
Heiko Carstens | dfeec84 | 2014-01-01 16:09:21 +0100 | [diff] [blame] | 984 | static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) |
| 985 | { |
| 986 | unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); |
| 987 | |
| 988 | return kvm_is_error_hva(hva); |
| 989 | } |
| 990 | |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 991 | enum kvm_stat_kind { |
| 992 | KVM_STAT_VM, |
| 993 | KVM_STAT_VCPU, |
| 994 | }; |
| 995 | |
Janosch Frank | 536a6f8 | 2016-05-18 13:26:23 +0200 | [diff] [blame] | 996 | struct kvm_stat_data { |
| 997 | int offset; |
| 998 | struct kvm *kvm; |
| 999 | }; |
| 1000 | |
Hollis Blanchard | 417bc30 | 2007-10-31 17:24:23 -0500 | [diff] [blame] | 1001 | struct kvm_stats_debugfs_item { |
| 1002 | const char *name; |
| 1003 | int offset; |
Avi Kivity | ba1389b | 2007-11-18 16:24:12 +0200 | [diff] [blame] | 1004 | enum kvm_stat_kind kind; |
Hollis Blanchard | 417bc30 | 2007-10-31 17:24:23 -0500 | [diff] [blame] | 1005 | }; |
| 1006 | extern struct kvm_stats_debugfs_item debugfs_entries[]; |
Hollis Blanchard | 76f7c87 | 2008-04-15 16:05:42 -0500 | [diff] [blame] | 1007 | extern struct dentry *kvm_debugfs_dir; |
Feng(Eric) Liu | d4c9ff2 | 2008-04-10 08:47:53 -0400 | [diff] [blame] | 1008 | |
Marc Zyngier | 36c1ed8 | 2012-06-15 15:07:24 -0400 | [diff] [blame] | 1009 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
Christoffer Dall | 8ca40a7 | 2012-10-14 23:10:18 -0400 | [diff] [blame] | 1010 | static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1011 | { |
Christoffer Dall | 8ca40a7 | 2012-10-14 23:10:18 -0400 | [diff] [blame] | 1012 | if (unlikely(kvm->mmu_notifier_count)) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1013 | return 1; |
| 1014 | /* |
Paul Mackerras | a355aa5 | 2011-12-12 12:37:21 +0000 | [diff] [blame] | 1015 | * Ensure the read of mmu_notifier_count happens before the read |
| 1016 | * of mmu_notifier_seq. This interacts with the smp_wmb() in |
| 1017 | * mmu_notifier_invalidate_range_end to make sure that the caller |
| 1018 | * either sees the old (non-zero) value of mmu_notifier_count or |
| 1019 | * the new (incremented) value of mmu_notifier_seq. |
| 1020 | * PowerPC Book3s HV KVM calls this under a per-page lock |
| 1021 | * rather than under kvm->mmu_lock, for scalability, so |
| 1022 | * can't rely on kvm->mmu_lock to keep things ordered. |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1023 | */ |
Paul Mackerras | a355aa5 | 2011-12-12 12:37:21 +0000 | [diff] [blame] | 1024 | smp_rmb(); |
Christoffer Dall | 8ca40a7 | 2012-10-14 23:10:18 -0400 | [diff] [blame] | 1025 | if (kvm->mmu_notifier_seq != mmu_seq) |
Andrea Arcangeli | e930bff | 2008-07-25 16:24:52 +0200 | [diff] [blame] | 1026 | return 1; |
| 1027 | return 0; |
| 1028 | } |
| 1029 | #endif |
| 1030 | |
Alexander Graf | a725d56 | 2013-04-17 13:29:30 +0200 | [diff] [blame] | 1031 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 1032 | |
Cornelia Huck | f3f710b | 2014-02-25 12:48:01 +0100 | [diff] [blame] | 1033 | #ifdef CONFIG_S390 |
| 1034 | #define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that... |
| 1035 | #else |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 1036 | #define KVM_MAX_IRQ_ROUTES 1024 |
Cornelia Huck | f3f710b | 2014-02-25 12:48:01 +0100 | [diff] [blame] | 1037 | #endif |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 1038 | |
| 1039 | int kvm_setup_default_irq_routing(struct kvm *kvm); |
Steve Rutherford | 49df6397 | 2015-07-29 23:21:40 -0700 | [diff] [blame] | 1040 | int kvm_setup_empty_irq_routing(struct kvm *kvm); |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 1041 | int kvm_set_irq_routing(struct kvm *kvm, |
| 1042 | const struct kvm_irq_routing_entry *entries, |
| 1043 | unsigned nr, |
| 1044 | unsigned flags); |
Paul Mackerras | 8ba918d | 2014-06-30 20:51:10 +1000 | [diff] [blame] | 1045 | int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e, |
Alexander Graf | e8cde09 | 2013-04-15 23:23:21 +0200 | [diff] [blame] | 1046 | const struct kvm_irq_routing_entry *ue); |
Avi Kivity | 399ec80 | 2008-11-19 13:58:46 +0200 | [diff] [blame] | 1047 | void kvm_free_irq_routing(struct kvm *kvm); |
| 1048 | |
| 1049 | #else |
| 1050 | |
| 1051 | static inline void kvm_free_irq_routing(struct kvm *kvm) {} |
| 1052 | |
| 1053 | #endif |
| 1054 | |
Paul Mackerras | 297e210 | 2014-06-30 20:51:13 +1000 | [diff] [blame] | 1055 | int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); |
| 1056 | |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 1057 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
| 1058 | |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 1059 | void kvm_eventfd_init(struct kvm *kvm); |
Alexander Graf | 914daba | 2012-10-09 00:22:59 +0200 | [diff] [blame] | 1060 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); |
| 1061 | |
Paul Mackerras | 297e210 | 2014-06-30 20:51:13 +1000 | [diff] [blame] | 1062 | #ifdef CONFIG_HAVE_KVM_IRQFD |
Alex Williamson | d4db293 | 2012-06-29 09:56:08 -0600 | [diff] [blame] | 1063 | int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 1064 | void kvm_irqfd_release(struct kvm *kvm); |
Paul Mackerras | 9957c86 | 2014-06-30 20:51:11 +1000 | [diff] [blame] | 1065 | void kvm_irq_routing_update(struct kvm *); |
Alexander Graf | 914daba | 2012-10-09 00:22:59 +0200 | [diff] [blame] | 1066 | #else |
| 1067 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
| 1068 | { |
| 1069 | return -EINVAL; |
| 1070 | } |
| 1071 | |
| 1072 | static inline void kvm_irqfd_release(struct kvm *kvm) {} |
| 1073 | #endif |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 1074 | |
| 1075 | #else |
| 1076 | |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 1077 | static inline void kvm_eventfd_init(struct kvm *kvm) {} |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 1078 | |
Alex Williamson | d4db293 | 2012-06-29 09:56:08 -0600 | [diff] [blame] | 1079 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 1080 | { |
| 1081 | return -EINVAL; |
| 1082 | } |
| 1083 | |
| 1084 | static inline void kvm_irqfd_release(struct kvm *kvm) {} |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 1085 | |
Alexander Graf | 27923eb | 2010-11-25 10:25:44 +0100 | [diff] [blame] | 1086 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
Paul Mackerras | 9957c86 | 2014-06-30 20:51:11 +1000 | [diff] [blame] | 1087 | static inline void kvm_irq_routing_update(struct kvm *kvm) |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 1088 | { |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 1089 | } |
Alexander Graf | 27923eb | 2010-11-25 10:25:44 +0100 | [diff] [blame] | 1090 | #endif |
Andrey Smetanin | abdb080 | 2015-11-10 15:36:31 +0300 | [diff] [blame] | 1091 | void kvm_arch_irq_routing_update(struct kvm *kvm); |
Michael S. Tsirkin | bd2b53b | 2010-11-18 19:09:08 +0200 | [diff] [blame] | 1092 | |
Gregory Haskins | d34e6b1 | 2009-07-07 17:08:49 -0400 | [diff] [blame] | 1093 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
| 1094 | { |
| 1095 | return -ENOSYS; |
| 1096 | } |
Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame] | 1097 | |
| 1098 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ |
| 1099 | |
Gleb Natapov | 73880c8 | 2009-06-09 15:56:28 +0300 | [diff] [blame] | 1100 | #ifdef CONFIG_KVM_APIC_ARCHITECTURE |
Avi Kivity | 3e51570 | 2012-03-05 14:23:29 +0200 | [diff] [blame] | 1101 | bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu); |
Avi Kivity | 3e51570 | 2012-03-05 14:23:29 +0200 | [diff] [blame] | 1102 | #else |
Avi Kivity | 3e51570 | 2012-03-05 14:23:29 +0200 | [diff] [blame] | 1103 | static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; } |
Avi Kivity | 6aa8b73 | 2006-12-10 02:21:36 -0800 | [diff] [blame] | 1104 | #endif |
Avi Kivity | bfd99ff | 2009-08-26 14:57:50 +0300 | [diff] [blame] | 1105 | |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 1106 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) |
| 1107 | { |
Paolo Bonzini | 2e4682b | 2016-03-10 16:30:22 +0100 | [diff] [blame] | 1108 | /* |
| 1109 | * Ensure the rest of the request is published to kvm_check_request's |
| 1110 | * caller. Paired with the smp_mb__after_atomic in kvm_check_request. |
| 1111 | */ |
| 1112 | smp_wmb(); |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 1113 | set_bit(req, &vcpu->requests); |
| 1114 | } |
| 1115 | |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 1116 | static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) |
| 1117 | { |
Avi Kivity | 0719837 | 2010-05-10 13:08:26 +0300 | [diff] [blame] | 1118 | if (test_bit(req, &vcpu->requests)) { |
| 1119 | clear_bit(req, &vcpu->requests); |
Paolo Bonzini | 2e4682b | 2016-03-10 16:30:22 +0100 | [diff] [blame] | 1120 | |
| 1121 | /* |
| 1122 | * Ensure the rest of the request is visible to kvm_check_request's |
| 1123 | * caller. Paired with the smp_wmb in kvm_make_request. |
| 1124 | */ |
| 1125 | smp_mb__after_atomic(); |
Avi Kivity | 0719837 | 2010-05-10 13:08:26 +0300 | [diff] [blame] | 1126 | return true; |
| 1127 | } else { |
| 1128 | return false; |
| 1129 | } |
Avi Kivity | a8eeb04 | 2010-05-10 12:34:53 +0300 | [diff] [blame] | 1130 | } |
| 1131 | |
Geoff Levand | 8b415dc | 2013-04-05 19:20:30 +0000 | [diff] [blame] | 1132 | extern bool kvm_rebooting; |
| 1133 | |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 1134 | struct kvm_device { |
| 1135 | struct kvm_device_ops *ops; |
| 1136 | struct kvm *kvm; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 1137 | void *private; |
Scott Wood | 07f0a7b | 2013-04-25 14:11:23 +0000 | [diff] [blame] | 1138 | struct list_head vm_node; |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 1139 | }; |
| 1140 | |
| 1141 | /* create, destroy, and name are mandatory */ |
| 1142 | struct kvm_device_ops { |
| 1143 | const char *name; |
| 1144 | int (*create)(struct kvm_device *dev, u32 type); |
| 1145 | |
| 1146 | /* |
| 1147 | * Destroy is responsible for freeing dev. |
| 1148 | * |
| 1149 | * Destroy may be called before or after destructors are called |
| 1150 | * on emulated I/O regions, depending on whether a reference is |
| 1151 | * held by a vcpu or other kvm component that gets destroyed |
| 1152 | * after the emulated I/O. |
| 1153 | */ |
| 1154 | void (*destroy)(struct kvm_device *dev); |
| 1155 | |
| 1156 | int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
| 1157 | int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
| 1158 | int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
| 1159 | long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, |
| 1160 | unsigned long arg); |
| 1161 | }; |
| 1162 | |
| 1163 | void kvm_device_get(struct kvm_device *dev); |
| 1164 | void kvm_device_put(struct kvm_device *dev); |
| 1165 | struct kvm_device *kvm_device_from_filp(struct file *filp); |
Will Deacon | d60eacb | 2014-09-02 10:27:33 +0100 | [diff] [blame] | 1166 | int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type); |
Wanpeng Li | 571ee1b | 2014-10-09 18:30:08 +0800 | [diff] [blame] | 1167 | void kvm_unregister_device_ops(u32 type); |
Scott Wood | 852b6d5 | 2013-04-12 14:08:42 +0000 | [diff] [blame] | 1168 | |
Scott Wood | 5df554ad | 2013-04-12 14:08:46 +0000 | [diff] [blame] | 1169 | extern struct kvm_device_ops kvm_mpic_ops; |
Paul Mackerras | 5975a2e | 2013-04-27 00:28:37 +0000 | [diff] [blame] | 1170 | extern struct kvm_device_ops kvm_xics_ops; |
Andre Przywara | ea2f83a | 2014-10-26 23:17:00 +0000 | [diff] [blame] | 1171 | extern struct kvm_device_ops kvm_arm_vgic_v2_ops; |
Andre Przywara | a0675c2 | 2014-06-07 00:54:51 +0200 | [diff] [blame] | 1172 | extern struct kvm_device_ops kvm_arm_vgic_v3_ops; |
Scott Wood | 5df554ad | 2013-04-12 14:08:46 +0000 | [diff] [blame] | 1173 | |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 1174 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
| 1175 | |
| 1176 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) |
| 1177 | { |
| 1178 | vcpu->spin_loop.in_spin_loop = val; |
| 1179 | } |
| 1180 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) |
| 1181 | { |
| 1182 | vcpu->spin_loop.dy_eligible = val; |
| 1183 | } |
| 1184 | |
| 1185 | #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
| 1186 | |
| 1187 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) |
| 1188 | { |
| 1189 | } |
| 1190 | |
| 1191 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) |
| 1192 | { |
| 1193 | } |
Raghavendra K T | 4c08849 | 2012-07-18 19:07:46 +0530 | [diff] [blame] | 1194 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
Eric Auger | 1a02b27 | 2015-09-18 22:29:43 +0800 | [diff] [blame] | 1195 | |
| 1196 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
Alex Williamson | 14717e2 | 2016-05-05 11:58:35 -0600 | [diff] [blame] | 1197 | bool kvm_arch_has_irq_bypass(void); |
Eric Auger | 1a02b27 | 2015-09-18 22:29:43 +0800 | [diff] [blame] | 1198 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, |
| 1199 | struct irq_bypass_producer *); |
| 1200 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, |
| 1201 | struct irq_bypass_producer *); |
| 1202 | void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); |
| 1203 | void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); |
Feng Wu | f70c20a | 2015-09-18 22:29:53 +0800 | [diff] [blame] | 1204 | int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, |
| 1205 | uint32_t guest_irq, bool set); |
Eric Auger | 1a02b27 | 2015-09-18 22:29:43 +0800 | [diff] [blame] | 1206 | #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ |
Haozhong Zhang | 35181e8 | 2015-10-20 15:39:03 +0800 | [diff] [blame] | 1207 | |
Christian Borntraeger | 3491caf | 2016-05-13 12:16:35 +0200 | [diff] [blame] | 1208 | #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS |
| 1209 | /* If we wakeup during the poll time, was it a sucessful poll? */ |
| 1210 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) |
| 1211 | { |
| 1212 | return vcpu->valid_wakeup; |
| 1213 | } |
| 1214 | |
| 1215 | #else |
| 1216 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) |
| 1217 | { |
| 1218 | return true; |
| 1219 | } |
| 1220 | #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ |
| 1221 | |
Avi Kivity | bfd99ff | 2009-08-26 14:57:50 +0300 | [diff] [blame] | 1222 | #endif |