blob: 8bd44d6985c7a1e444ee40768658f5a6c3b73956 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * This module enables machines with Intel VT-x extensions to run virtual
5 * machines without emulation or binary translation.
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 *
9 * Authors:
10 * Avi Kivity <avi@qumranet.com>
11 * Yaniv Kamay <yaniv@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
17
Hollis Blancharde2174022007-12-03 15:30:24 -060018#include "iodev.h"
Avi Kivity6aa8b732006-12-10 02:21:36 -080019
Avi Kivityedf88412007-12-16 11:02:48 +020020#include <linux/kvm_host.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080021#include <linux/kvm.h>
22#include <linux/module.h>
23#include <linux/errno.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080024#include <linux/percpu.h>
25#include <linux/gfp.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080026#include <linux/mm.h>
27#include <linux/miscdevice.h>
28#include <linux/vmalloc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080029#include <linux/reboot.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080030#include <linux/debugfs.h>
31#include <linux/highmem.h>
32#include <linux/file.h>
Avi Kivity59ae6c62007-02-12 00:54:48 -080033#include <linux/sysdev.h>
Avi Kivity774c47f2007-02-12 00:54:47 -080034#include <linux/cpu.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040035#include <linux/sched.h>
Avi Kivityd9e368d2007-06-07 19:18:30 +030036#include <linux/cpumask.h>
37#include <linux/smp.h>
Avi Kivityd6d28162007-06-28 08:38:16 -040038#include <linux/anon_inodes.h>
Avi Kivity04d2cc72007-09-10 18:10:54 +030039#include <linux/profile.h>
Anthony Liguori7aa81cc2007-09-17 14:57:50 -050040#include <linux/kvm_para.h>
Izik Eidus6fc138d2007-10-09 19:20:39 +020041#include <linux/pagemap.h>
Anthony Liguori8d4e1282007-10-18 09:59:34 -050042#include <linux/mman.h>
Anthony Liguori35149e22008-04-02 14:46:56 -050043#include <linux/swap.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080044
Avi Kivitye4956062007-06-28 14:15:57 -040045#include <asm/processor.h>
Avi Kivitye4956062007-06-28 14:15:57 -040046#include <asm/io.h>
47#include <asm/uaccess.h>
Izik Eidus3e021bf2007-11-19 11:16:57 +020048#include <asm/pgtable.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080049
Laurent Vivier5f94c172008-05-30 16:05:54 +020050#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
51#include "coalesced_mmio.h"
52#endif
53
Xiantao Zhang8a98f662008-10-06 13:47:38 +080054#ifdef KVM_CAP_DEVICE_ASSIGNMENT
55#include <linux/pci.h>
56#include <linux/interrupt.h>
57#include "irq.h"
58#endif
59
Avi Kivity6aa8b732006-12-10 02:21:36 -080060MODULE_AUTHOR("Qumranet");
61MODULE_LICENSE("GPL");
62
Sheng Yang5319c662008-11-24 14:32:57 +080063static int msi2intx = 1;
64module_param(msi2intx, bool, 0);
65
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +080066DEFINE_SPINLOCK(kvm_lock);
67LIST_HEAD(vm_list);
Avi Kivity133de902007-02-12 00:54:44 -080068
Rusty Russell7f59f492008-12-07 21:25:45 +103069static cpumask_var_t cpus_hardware_enabled;
Avi Kivity1b6c0162007-05-24 13:03:52 +030070
Rusty Russellc16f8622007-07-30 21:12:19 +100071struct kmem_cache *kvm_vcpu_cache;
72EXPORT_SYMBOL_GPL(kvm_vcpu_cache);
Avi Kivity1165f5f2007-04-19 17:27:43 +030073
Avi Kivity15ad7142007-07-11 18:17:21 +030074static __read_mostly struct preempt_ops kvm_preempt_ops;
75
Hollis Blanchard76f7c872008-04-15 16:05:42 -050076struct dentry *kvm_debugfs_dir;
Avi Kivity6aa8b732006-12-10 02:21:36 -080077
Avi Kivitybccf2152007-02-21 18:04:26 +020078static long kvm_vcpu_ioctl(struct file *file, unsigned int ioctl,
79 unsigned long arg);
80
Hannes Edere8ba5d32008-11-28 17:02:42 +010081static bool kvm_rebooting;
Avi Kivity4ecac3f2008-05-13 13:23:38 +030082
Xiantao Zhang8a98f662008-10-06 13:47:38 +080083#ifdef KVM_CAP_DEVICE_ASSIGNMENT
84static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
85 int assigned_dev_id)
86{
87 struct list_head *ptr;
88 struct kvm_assigned_dev_kernel *match;
89
90 list_for_each(ptr, head) {
91 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
92 if (match->assigned_dev_id == assigned_dev_id)
93 return match;
94 }
95 return NULL;
96}
97
Sheng Yang2350bd12009-02-25 17:22:27 +080098static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
99 *assigned_dev, int irq)
100{
101 int i, index;
102 struct msix_entry *host_msix_entries;
103
104 host_msix_entries = assigned_dev->host_msix_entries;
105
106 index = -1;
107 for (i = 0; i < assigned_dev->entries_nr; i++)
108 if (irq == host_msix_entries[i].vector) {
109 index = i;
110 break;
111 }
112 if (index < 0) {
113 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
114 return 0;
115 }
116
117 return index;
118}
119
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800120static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
121{
122 struct kvm_assigned_dev_kernel *assigned_dev;
Sheng Yang2350bd12009-02-25 17:22:27 +0800123 struct kvm *kvm;
124 int irq, i;
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800125
126 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
127 interrupt_work);
Sheng Yang2350bd12009-02-25 17:22:27 +0800128 kvm = assigned_dev->kvm;
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800129
130 /* This is taken to safely inject irq inside the guest. When
131 * the interrupt injection (or the ioapic code) uses a
132 * finer-grained lock, update this
133 */
Sheng Yang2350bd12009-02-25 17:22:27 +0800134 mutex_lock(&kvm->lock);
135 if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_MSIX) {
136 struct kvm_guest_msix_entry *guest_entries =
137 assigned_dev->guest_msix_entries;
138 for (i = 0; i < assigned_dev->entries_nr; i++) {
139 if (!(guest_entries[i].flags &
140 KVM_ASSIGNED_MSIX_PENDING))
141 continue;
142 guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
143 kvm_set_irq(assigned_dev->kvm,
144 assigned_dev->irq_source_id,
145 guest_entries[i].vector, 1);
146 irq = assigned_dev->host_msix_entries[i].vector;
147 if (irq != 0)
148 enable_irq(irq);
149 assigned_dev->host_irq_disabled = false;
150 }
151 } else {
152 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
153 assigned_dev->guest_irq, 1);
154 if (assigned_dev->irq_requested_type &
155 KVM_ASSIGNED_DEV_GUEST_MSI) {
156 enable_irq(assigned_dev->host_irq);
157 assigned_dev->host_irq_disabled = false;
158 }
Sheng Yang6b9cc7f2008-11-24 14:32:56 +0800159 }
Sheng Yang2350bd12009-02-25 17:22:27 +0800160
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800161 mutex_unlock(&assigned_dev->kvm->lock);
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800162}
163
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800164static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
165{
166 struct kvm_assigned_dev_kernel *assigned_dev =
167 (struct kvm_assigned_dev_kernel *) dev_id;
168
Sheng Yang2350bd12009-02-25 17:22:27 +0800169 if (assigned_dev->irq_requested_type == KVM_ASSIGNED_DEV_MSIX) {
170 int index = find_index_from_host_irq(assigned_dev, irq);
171 if (index < 0)
172 return IRQ_HANDLED;
173 assigned_dev->guest_msix_entries[index].flags |=
174 KVM_ASSIGNED_MSIX_PENDING;
175 }
176
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800177 schedule_work(&assigned_dev->interrupt_work);
Mark McLoughlindefaf152008-12-02 12:16:33 +0000178
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800179 disable_irq_nosync(irq);
Mark McLoughlindefaf152008-12-02 12:16:33 +0000180 assigned_dev->host_irq_disabled = true;
181
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800182 return IRQ_HANDLED;
183}
184
185/* Ack the irq line for an assigned device */
186static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
187{
188 struct kvm_assigned_dev_kernel *dev;
189
190 if (kian->gsi == -1)
191 return;
192
193 dev = container_of(kian, struct kvm_assigned_dev_kernel,
194 ack_notifier);
Mark McLoughlindefaf152008-12-02 12:16:33 +0000195
Sheng Yang5550af42008-10-15 20:15:06 +0800196 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
Mark McLoughlindefaf152008-12-02 12:16:33 +0000197
198 /* The guest irq may be shared so this ack may be
199 * from another device.
200 */
201 if (dev->host_irq_disabled) {
202 enable_irq(dev->host_irq);
203 dev->host_irq_disabled = false;
204 }
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800205}
206
Sheng Yangba4cef32009-01-06 10:03:03 +0800207/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
Mark McLoughlin4a643be2008-12-01 13:57:49 +0000208static void kvm_free_assigned_irq(struct kvm *kvm,
209 struct kvm_assigned_dev_kernel *assigned_dev)
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800210{
Mark McLoughlin4a643be2008-12-01 13:57:49 +0000211 if (!irqchip_in_kernel(kvm))
212 return;
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800213
Sheng Yange19e30e2008-10-20 16:07:10 +0800214 kvm_unregister_irq_ack_notifier(&assigned_dev->ack_notifier);
Mark McLoughlinf29b2672008-12-01 13:57:47 +0000215
216 if (assigned_dev->irq_source_id != -1)
217 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
218 assigned_dev->irq_source_id = -1;
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800219
Mark McLoughlin4a643be2008-12-01 13:57:49 +0000220 if (!assigned_dev->irq_requested_type)
221 return;
222
Sheng Yangba4cef32009-01-06 10:03:03 +0800223 /*
224 * In kvm_free_device_irq, cancel_work_sync return true if:
225 * 1. work is scheduled, and then cancelled.
226 * 2. work callback is executed.
227 *
228 * The first one ensured that the irq is disabled and no more events
229 * would happen. But for the second one, the irq may be enabled (e.g.
230 * for MSI). So we disable irq here to prevent further events.
231 *
232 * Notice this maybe result in nested disable if the interrupt type is
233 * INTx, but it's OK for we are going to free it.
234 *
235 * If this function is a part of VM destroy, please ensure that till
236 * now, the kvm state is still legal for probably we also have to wait
237 * interrupt_work done.
238 */
239 disable_irq_nosync(assigned_dev->host_irq);
240 cancel_work_sync(&assigned_dev->interrupt_work);
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800241
Mark McLoughlin4a643be2008-12-01 13:57:49 +0000242 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
243
244 if (assigned_dev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
245 pci_disable_msi(assigned_dev->dev);
246
247 assigned_dev->irq_requested_type = 0;
248}
249
250
251static void kvm_free_assigned_device(struct kvm *kvm,
252 struct kvm_assigned_dev_kernel
253 *assigned_dev)
254{
255 kvm_free_assigned_irq(kvm, assigned_dev);
256
Sheng Yang6eb55812008-10-31 12:37:41 +0800257 pci_reset_function(assigned_dev->dev);
258
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800259 pci_release_regions(assigned_dev->dev);
260 pci_disable_device(assigned_dev->dev);
261 pci_dev_put(assigned_dev->dev);
262
263 list_del(&assigned_dev->list);
264 kfree(assigned_dev);
265}
266
267void kvm_free_all_assigned_devices(struct kvm *kvm)
268{
269 struct list_head *ptr, *ptr2;
270 struct kvm_assigned_dev_kernel *assigned_dev;
271
272 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
273 assigned_dev = list_entry(ptr,
274 struct kvm_assigned_dev_kernel,
275 list);
276
277 kvm_free_assigned_device(kvm, assigned_dev);
278 }
279}
280
Sheng Yang00e3ed32008-11-24 14:32:50 +0800281static int assigned_device_update_intx(struct kvm *kvm,
282 struct kvm_assigned_dev_kernel *adev,
283 struct kvm_assigned_irq *airq)
284{
Sheng Yangfbac7812008-11-24 14:32:52 +0800285 adev->guest_irq = airq->guest_irq;
286 adev->ack_notifier.gsi = airq->guest_irq;
287
288 if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_INTX)
Sheng Yang00e3ed32008-11-24 14:32:50 +0800289 return 0;
Sheng Yang00e3ed32008-11-24 14:32:50 +0800290
291 if (irqchip_in_kernel(kvm)) {
Sheng Yang5319c662008-11-24 14:32:57 +0800292 if (!msi2intx &&
Sheng Yangd7cff1c2009-01-06 16:25:10 +0800293 (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)) {
294 free_irq(adev->host_irq, (void *)adev);
Sheng Yang6b9cc7f2008-11-24 14:32:56 +0800295 pci_disable_msi(adev->dev);
296 }
297
Sheng Yang00e3ed32008-11-24 14:32:50 +0800298 if (!capable(CAP_SYS_RAWIO))
299 return -EPERM;
300
301 if (airq->host_irq)
302 adev->host_irq = airq->host_irq;
303 else
304 adev->host_irq = adev->dev->irq;
Sheng Yang00e3ed32008-11-24 14:32:50 +0800305
306 /* Even though this is PCI, we don't want to use shared
307 * interrupts. Sharing host devices with guest-assigned devices
308 * on the same interrupt line is not a happy situation: there
309 * are going to be long delays in accepting, acking, etc.
310 */
311 if (request_irq(adev->host_irq, kvm_assigned_dev_intr,
312 0, "kvm_assigned_intx_device", (void *)adev))
313 return -EIO;
314 }
315
Sheng Yang4f906c12008-11-24 14:32:51 +0800316 adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_INTX |
317 KVM_ASSIGNED_DEV_HOST_INTX;
Sheng Yang00e3ed32008-11-24 14:32:50 +0800318 return 0;
319}
320
Sheng Yang6b9cc7f2008-11-24 14:32:56 +0800321#ifdef CONFIG_X86
322static int assigned_device_update_msi(struct kvm *kvm,
323 struct kvm_assigned_dev_kernel *adev,
324 struct kvm_assigned_irq *airq)
325{
326 int r;
327
Sheng Yang79950e12009-02-10 13:57:06 +0800328 adev->guest_irq = airq->guest_irq;
Sheng Yang5319c662008-11-24 14:32:57 +0800329 if (airq->flags & KVM_DEV_IRQ_ASSIGN_ENABLE_MSI) {
330 /* x86 don't care upper address of guest msi message addr */
331 adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_MSI;
332 adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_INTX;
Sheng Yang5319c662008-11-24 14:32:57 +0800333 adev->ack_notifier.gsi = -1;
334 } else if (msi2intx) {
335 adev->irq_requested_type |= KVM_ASSIGNED_DEV_GUEST_INTX;
336 adev->irq_requested_type &= ~KVM_ASSIGNED_DEV_GUEST_MSI;
Sheng Yang5319c662008-11-24 14:32:57 +0800337 adev->ack_notifier.gsi = airq->guest_irq;
Sheng Yang17071fe2009-01-06 16:25:11 +0800338 } else {
339 /*
340 * Guest require to disable device MSI, we disable MSI and
341 * re-enable INTx by default again. Notice it's only for
342 * non-msi2intx.
343 */
344 assigned_device_update_intx(kvm, adev, airq);
345 return 0;
Sheng Yang5319c662008-11-24 14:32:57 +0800346 }
Sheng Yang6b9cc7f2008-11-24 14:32:56 +0800347
348 if (adev->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI)
349 return 0;
350
351 if (irqchip_in_kernel(kvm)) {
Sheng Yang5319c662008-11-24 14:32:57 +0800352 if (!msi2intx) {
353 if (adev->irq_requested_type &
354 KVM_ASSIGNED_DEV_HOST_INTX)
355 free_irq(adev->host_irq, (void *)adev);
Sheng Yang6b9cc7f2008-11-24 14:32:56 +0800356
Sheng Yang5319c662008-11-24 14:32:57 +0800357 r = pci_enable_msi(adev->dev);
358 if (r)
359 return r;
360 }
Sheng Yang6b9cc7f2008-11-24 14:32:56 +0800361
362 adev->host_irq = adev->dev->irq;
363 if (request_irq(adev->host_irq, kvm_assigned_dev_intr, 0,
364 "kvm_assigned_msi_device", (void *)adev))
365 return -EIO;
366 }
367
Sheng Yang5319c662008-11-24 14:32:57 +0800368 if (!msi2intx)
369 adev->irq_requested_type = KVM_ASSIGNED_DEV_GUEST_MSI;
370
371 adev->irq_requested_type |= KVM_ASSIGNED_DEV_HOST_MSI;
Sheng Yang6b9cc7f2008-11-24 14:32:56 +0800372 return 0;
373}
374#endif
375
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800376static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
377 struct kvm_assigned_irq
378 *assigned_irq)
379{
380 int r = 0;
381 struct kvm_assigned_dev_kernel *match;
Sheng Yang17071fe2009-01-06 16:25:11 +0800382 u32 current_flags = 0, changed_flags;
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800383
384 mutex_lock(&kvm->lock);
385
386 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
387 assigned_irq->assigned_dev_id);
388 if (!match) {
389 mutex_unlock(&kvm->lock);
390 return -EINVAL;
391 }
392
Sheng Yang4f906c12008-11-24 14:32:51 +0800393 if (!match->irq_requested_type) {
Sheng Yang342ffb92008-11-24 14:32:49 +0800394 INIT_WORK(&match->interrupt_work,
395 kvm_assigned_dev_interrupt_work_handler);
396 if (irqchip_in_kernel(kvm)) {
397 /* Register ack nofitier */
398 match->ack_notifier.gsi = -1;
399 match->ack_notifier.irq_acked =
400 kvm_assigned_dev_ack_irq;
401 kvm_register_irq_ack_notifier(kvm,
402 &match->ack_notifier);
403
404 /* Request IRQ source ID */
405 r = kvm_request_irq_source_id(kvm);
406 if (r < 0)
407 goto out_release;
408 else
409 match->irq_source_id = r;
Sheng Yang5319c662008-11-24 14:32:57 +0800410
411#ifdef CONFIG_X86
412 /* Determine host device irq type, we can know the
413 * result from dev->msi_enabled */
414 if (msi2intx)
415 pci_enable_msi(match->dev);
416#endif
Sheng Yang342ffb92008-11-24 14:32:49 +0800417 }
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800418 }
419
Sheng Yang17071fe2009-01-06 16:25:11 +0800420 if ((match->irq_requested_type & KVM_ASSIGNED_DEV_HOST_MSI) &&
421 (match->irq_requested_type & KVM_ASSIGNED_DEV_GUEST_MSI))
422 current_flags |= KVM_DEV_IRQ_ASSIGN_ENABLE_MSI;
423
424 changed_flags = assigned_irq->flags ^ current_flags;
425
426 if ((changed_flags & KVM_DEV_IRQ_ASSIGN_MSI_ACTION) ||
Sheng Yang5319c662008-11-24 14:32:57 +0800427 (msi2intx && match->dev->msi_enabled)) {
Sheng Yang6b9cc7f2008-11-24 14:32:56 +0800428#ifdef CONFIG_X86
429 r = assigned_device_update_msi(kvm, match, assigned_irq);
430 if (r) {
431 printk(KERN_WARNING "kvm: failed to enable "
432 "MSI device!\n");
433 goto out_release;
434 }
435#else
436 r = -ENOTTY;
437#endif
438 } else if (assigned_irq->host_irq == 0 && match->dev->irq == 0) {
439 /* Host device IRQ 0 means don't support INTx */
Sheng Yang5319c662008-11-24 14:32:57 +0800440 if (!msi2intx) {
441 printk(KERN_WARNING
442 "kvm: wait device to enable MSI!\n");
443 r = 0;
444 } else {
445 printk(KERN_WARNING
446 "kvm: failed to enable MSI device!\n");
447 r = -ENOTTY;
448 goto out_release;
449 }
Sheng Yang6b9cc7f2008-11-24 14:32:56 +0800450 } else {
451 /* Non-sharing INTx mode */
452 r = assigned_device_update_intx(kvm, match, assigned_irq);
453 if (r) {
454 printk(KERN_WARNING "kvm: failed to enable "
455 "INTx device!\n");
456 goto out_release;
457 }
458 }
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800459
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800460 mutex_unlock(&kvm->lock);
461 return r;
462out_release:
463 mutex_unlock(&kvm->lock);
464 kvm_free_assigned_device(kvm, match);
465 return r;
466}
467
468static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
469 struct kvm_assigned_pci_dev *assigned_dev)
470{
471 int r = 0;
472 struct kvm_assigned_dev_kernel *match;
473 struct pci_dev *dev;
474
Mark McLoughlin682edb42009-02-05 18:23:46 +0000475 down_read(&kvm->slots_lock);
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800476 mutex_lock(&kvm->lock);
477
478 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
479 assigned_dev->assigned_dev_id);
480 if (match) {
481 /* device already assigned */
482 r = -EINVAL;
483 goto out;
484 }
485
486 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
487 if (match == NULL) {
488 printk(KERN_INFO "%s: Couldn't allocate memory\n",
489 __func__);
490 r = -ENOMEM;
491 goto out;
492 }
493 dev = pci_get_bus_and_slot(assigned_dev->busnr,
494 assigned_dev->devfn);
495 if (!dev) {
496 printk(KERN_INFO "%s: host device not found\n", __func__);
497 r = -EINVAL;
498 goto out_free;
499 }
500 if (pci_enable_device(dev)) {
501 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
502 r = -EBUSY;
503 goto out_put;
504 }
505 r = pci_request_regions(dev, "kvm_assigned_device");
506 if (r) {
507 printk(KERN_INFO "%s: Could not get access to device regions\n",
508 __func__);
509 goto out_disable;
510 }
Sheng Yang6eb55812008-10-31 12:37:41 +0800511
512 pci_reset_function(dev);
513
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800514 match->assigned_dev_id = assigned_dev->assigned_dev_id;
515 match->host_busnr = assigned_dev->busnr;
516 match->host_devfn = assigned_dev->devfn;
Weidong Hanb6535742008-12-08 23:29:53 +0800517 match->flags = assigned_dev->flags;
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800518 match->dev = dev;
Mark McLoughlinf29b2672008-12-01 13:57:47 +0000519 match->irq_source_id = -1;
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800520 match->kvm = kvm;
521
522 list_add(&match->list, &kvm->arch.assigned_dev_head);
523
524 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
Joerg Roedel19de40a2008-12-03 14:43:34 +0100525 if (!kvm->arch.iommu_domain) {
Weidong Han260782b2008-12-02 21:03:39 +0800526 r = kvm_iommu_map_guest(kvm);
527 if (r)
528 goto out_list_del;
529 }
530 r = kvm_assign_device(kvm, match);
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800531 if (r)
532 goto out_list_del;
533 }
534
535out:
536 mutex_unlock(&kvm->lock);
Mark McLoughlin682edb42009-02-05 18:23:46 +0000537 up_read(&kvm->slots_lock);
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800538 return r;
539out_list_del:
540 list_del(&match->list);
541 pci_release_regions(dev);
542out_disable:
543 pci_disable_device(dev);
544out_put:
545 pci_dev_put(dev);
546out_free:
547 kfree(match);
548 mutex_unlock(&kvm->lock);
Mark McLoughlin682edb42009-02-05 18:23:46 +0000549 up_read(&kvm->slots_lock);
Xiantao Zhang8a98f662008-10-06 13:47:38 +0800550 return r;
551}
552#endif
553
Weidong Han0a920352008-12-02 21:24:23 +0800554#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
555static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
556 struct kvm_assigned_pci_dev *assigned_dev)
557{
558 int r = 0;
559 struct kvm_assigned_dev_kernel *match;
560
561 mutex_lock(&kvm->lock);
562
563 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
564 assigned_dev->assigned_dev_id);
565 if (!match) {
566 printk(KERN_INFO "%s: device hasn't been assigned before, "
567 "so cannot be deassigned\n", __func__);
568 r = -EINVAL;
569 goto out;
570 }
571
Weidong Han4a906e42009-02-13 17:27:51 +0800572 if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
Weidong Han0a920352008-12-02 21:24:23 +0800573 kvm_deassign_device(kvm, match);
574
575 kvm_free_assigned_device(kvm, match);
576
577out:
578 mutex_unlock(&kvm->lock);
579 return r;
580}
581#endif
582
James Morris5aacf0c2006-12-22 01:04:55 -0800583static inline int valid_vcpu(int n)
584{
585 return likely(n >= 0 && n < KVM_MAX_VCPUS);
586}
587
Xiantao Zhangc77fb9d2008-09-27 10:55:40 +0800588inline int kvm_is_mmio_pfn(pfn_t pfn)
Ben-Ami Yassourcbff90a2008-07-28 19:26:24 +0300589{
Joerg Roedelfc5659c2009-02-18 14:08:58 +0100590 if (pfn_valid(pfn)) {
591 struct page *page = compound_head(pfn_to_page(pfn));
592 return PageReserved(page);
593 }
Ben-Ami Yassourcbff90a2008-07-28 19:26:24 +0300594
595 return true;
596}
597
Avi Kivity6aa8b732006-12-10 02:21:36 -0800598/*
599 * Switches to specified vcpu, until a matching vcpu_put()
600 */
Carsten Otte313a3dc2007-10-11 19:16:52 +0200601void vcpu_load(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800602{
Avi Kivity15ad7142007-07-11 18:17:21 +0300603 int cpu;
604
Avi Kivitybccf2152007-02-21 18:04:26 +0200605 mutex_lock(&vcpu->mutex);
Avi Kivity15ad7142007-07-11 18:17:21 +0300606 cpu = get_cpu();
607 preempt_notifier_register(&vcpu->preempt_notifier);
Carsten Otte313a3dc2007-10-11 19:16:52 +0200608 kvm_arch_vcpu_load(vcpu, cpu);
Avi Kivity15ad7142007-07-11 18:17:21 +0300609 put_cpu();
Avi Kivitybccf2152007-02-21 18:04:26 +0200610}
611
Carsten Otte313a3dc2007-10-11 19:16:52 +0200612void vcpu_put(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800613{
Avi Kivity15ad7142007-07-11 18:17:21 +0300614 preempt_disable();
Carsten Otte313a3dc2007-10-11 19:16:52 +0200615 kvm_arch_vcpu_put(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +0300616 preempt_notifier_unregister(&vcpu->preempt_notifier);
617 preempt_enable();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800618 mutex_unlock(&vcpu->mutex);
619}
620
Avi Kivityd9e368d2007-06-07 19:18:30 +0300621static void ack_flush(void *_completed)
622{
Avi Kivityd9e368d2007-06-07 19:18:30 +0300623}
624
Rusty Russell49846892008-12-08 20:26:24 +1030625static bool make_all_cpus_request(struct kvm *kvm, unsigned int req)
Avi Kivityd9e368d2007-06-07 19:18:30 +0300626{
Avi Kivity597a5f52008-07-20 14:24:22 +0300627 int i, cpu, me;
Rusty Russell6ef7a1b2008-12-08 20:28:04 +1030628 cpumask_var_t cpus;
629 bool called = true;
Avi Kivityd9e368d2007-06-07 19:18:30 +0300630 struct kvm_vcpu *vcpu;
Avi Kivityd9e368d2007-06-07 19:18:30 +0300631
Rusty Russell6ef7a1b2008-12-08 20:28:04 +1030632 if (alloc_cpumask_var(&cpus, GFP_ATOMIC))
633 cpumask_clear(cpus);
634
Avi Kivity597a5f52008-07-20 14:24:22 +0300635 me = get_cpu();
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000636 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
637 vcpu = kvm->vcpus[i];
638 if (!vcpu)
639 continue;
Rusty Russell49846892008-12-08 20:26:24 +1030640 if (test_and_set_bit(req, &vcpu->requests))
Avi Kivityd9e368d2007-06-07 19:18:30 +0300641 continue;
642 cpu = vcpu->cpu;
Rusty Russell6ef7a1b2008-12-08 20:28:04 +1030643 if (cpus != NULL && cpu != -1 && cpu != me)
644 cpumask_set_cpu(cpu, cpus);
Avi Kivityd9e368d2007-06-07 19:18:30 +0300645 }
Rusty Russell6ef7a1b2008-12-08 20:28:04 +1030646 if (unlikely(cpus == NULL))
647 smp_call_function_many(cpu_online_mask, ack_flush, NULL, 1);
648 else if (!cpumask_empty(cpus))
649 smp_call_function_many(cpus, ack_flush, NULL, 1);
650 else
651 called = false;
Avi Kivity597a5f52008-07-20 14:24:22 +0300652 put_cpu();
Rusty Russell6ef7a1b2008-12-08 20:28:04 +1030653 free_cpumask_var(cpus);
Rusty Russell49846892008-12-08 20:26:24 +1030654 return called;
655}
656
657void kvm_flush_remote_tlbs(struct kvm *kvm)
658{
659 if (make_all_cpus_request(kvm, KVM_REQ_TLB_FLUSH))
660 ++kvm->stat.remote_tlb_flush;
Avi Kivityd9e368d2007-06-07 19:18:30 +0300661}
662
Marcelo Tosatti2e53d632008-02-20 14:47:24 -0500663void kvm_reload_remote_mmus(struct kvm *kvm)
664{
Rusty Russell49846892008-12-08 20:26:24 +1030665 make_all_cpus_request(kvm, KVM_REQ_MMU_RELOAD);
Marcelo Tosatti2e53d632008-02-20 14:47:24 -0500666}
667
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000668int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id)
669{
670 struct page *page;
671 int r;
672
673 mutex_init(&vcpu->mutex);
674 vcpu->cpu = -1;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000675 vcpu->kvm = kvm;
676 vcpu->vcpu_id = id;
Eddie Dongb6958ce2007-07-18 12:15:21 +0300677 init_waitqueue_head(&vcpu->wq);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000678
679 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
680 if (!page) {
681 r = -ENOMEM;
682 goto fail;
683 }
684 vcpu->run = page_address(page);
685
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +0800686 r = kvm_arch_vcpu_init(vcpu);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000687 if (r < 0)
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +0800688 goto fail_free_run;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000689 return 0;
690
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000691fail_free_run:
692 free_page((unsigned long)vcpu->run);
693fail:
Rusty Russell76fafa52007-10-08 10:50:48 +1000694 return r;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000695}
696EXPORT_SYMBOL_GPL(kvm_vcpu_init);
697
698void kvm_vcpu_uninit(struct kvm_vcpu *vcpu)
699{
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +0800700 kvm_arch_vcpu_uninit(vcpu);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000701 free_page((unsigned long)vcpu->run);
702}
703EXPORT_SYMBOL_GPL(kvm_vcpu_uninit);
704
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200705#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
706static inline struct kvm *mmu_notifier_to_kvm(struct mmu_notifier *mn)
707{
708 return container_of(mn, struct kvm, mmu_notifier);
709}
710
711static void kvm_mmu_notifier_invalidate_page(struct mmu_notifier *mn,
712 struct mm_struct *mm,
713 unsigned long address)
714{
715 struct kvm *kvm = mmu_notifier_to_kvm(mn);
716 int need_tlb_flush;
717
718 /*
719 * When ->invalidate_page runs, the linux pte has been zapped
720 * already but the page is still allocated until
721 * ->invalidate_page returns. So if we increase the sequence
722 * here the kvm page fault will notice if the spte can't be
723 * established because the page is going to be freed. If
724 * instead the kvm page fault establishes the spte before
725 * ->invalidate_page runs, kvm_unmap_hva will release it
726 * before returning.
727 *
728 * The sequence increase only need to be seen at spin_unlock
729 * time, and not at spin_lock time.
730 *
731 * Increasing the sequence after the spin_unlock would be
732 * unsafe because the kvm page fault could then establish the
733 * pte after kvm_unmap_hva returned, without noticing the page
734 * is going to be freed.
735 */
736 spin_lock(&kvm->mmu_lock);
737 kvm->mmu_notifier_seq++;
738 need_tlb_flush = kvm_unmap_hva(kvm, address);
739 spin_unlock(&kvm->mmu_lock);
740
741 /* we've to flush the tlb before the pages can be freed */
742 if (need_tlb_flush)
743 kvm_flush_remote_tlbs(kvm);
744
745}
746
747static void kvm_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn,
748 struct mm_struct *mm,
749 unsigned long start,
750 unsigned long end)
751{
752 struct kvm *kvm = mmu_notifier_to_kvm(mn);
753 int need_tlb_flush = 0;
754
755 spin_lock(&kvm->mmu_lock);
756 /*
757 * The count increase must become visible at unlock time as no
758 * spte can be established without taking the mmu_lock and
759 * count is also read inside the mmu_lock critical section.
760 */
761 kvm->mmu_notifier_count++;
762 for (; start < end; start += PAGE_SIZE)
763 need_tlb_flush |= kvm_unmap_hva(kvm, start);
764 spin_unlock(&kvm->mmu_lock);
765
766 /* we've to flush the tlb before the pages can be freed */
767 if (need_tlb_flush)
768 kvm_flush_remote_tlbs(kvm);
769}
770
771static void kvm_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn,
772 struct mm_struct *mm,
773 unsigned long start,
774 unsigned long end)
775{
776 struct kvm *kvm = mmu_notifier_to_kvm(mn);
777
778 spin_lock(&kvm->mmu_lock);
779 /*
780 * This sequence increase will notify the kvm page fault that
781 * the page that is going to be mapped in the spte could have
782 * been freed.
783 */
784 kvm->mmu_notifier_seq++;
785 /*
786 * The above sequence increase must be visible before the
787 * below count decrease but both values are read by the kvm
788 * page fault under mmu_lock spinlock so we don't need to add
789 * a smb_wmb() here in between the two.
790 */
791 kvm->mmu_notifier_count--;
792 spin_unlock(&kvm->mmu_lock);
793
794 BUG_ON(kvm->mmu_notifier_count < 0);
795}
796
797static int kvm_mmu_notifier_clear_flush_young(struct mmu_notifier *mn,
798 struct mm_struct *mm,
799 unsigned long address)
800{
801 struct kvm *kvm = mmu_notifier_to_kvm(mn);
802 int young;
803
804 spin_lock(&kvm->mmu_lock);
805 young = kvm_age_hva(kvm, address);
806 spin_unlock(&kvm->mmu_lock);
807
808 if (young)
809 kvm_flush_remote_tlbs(kvm);
810
811 return young;
812}
813
Marcelo Tosatti85db06e2008-12-10 21:23:26 +0100814static void kvm_mmu_notifier_release(struct mmu_notifier *mn,
815 struct mm_struct *mm)
816{
817 struct kvm *kvm = mmu_notifier_to_kvm(mn);
818 kvm_arch_flush_shadow(kvm);
819}
820
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200821static const struct mmu_notifier_ops kvm_mmu_notifier_ops = {
822 .invalidate_page = kvm_mmu_notifier_invalidate_page,
823 .invalidate_range_start = kvm_mmu_notifier_invalidate_range_start,
824 .invalidate_range_end = kvm_mmu_notifier_invalidate_range_end,
825 .clear_flush_young = kvm_mmu_notifier_clear_flush_young,
Marcelo Tosatti85db06e2008-12-10 21:23:26 +0100826 .release = kvm_mmu_notifier_release,
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200827};
828#endif /* CONFIG_MMU_NOTIFIER && KVM_ARCH_WANT_MMU_NOTIFIER */
829
Avi Kivityf17abe92007-02-21 19:28:04 +0200830static struct kvm *kvm_create_vm(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800831{
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +0800832 struct kvm *kvm = kvm_arch_create_vm();
Laurent Vivier5f94c172008-05-30 16:05:54 +0200833#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
834 struct page *page;
835#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800836
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +0800837 if (IS_ERR(kvm))
838 goto out;
Avi Kivity75858a82009-01-04 17:10:50 +0200839#ifdef CONFIG_HAVE_KVM_IRQCHIP
Avi Kivity399ec802008-11-19 13:58:46 +0200840 INIT_LIST_HEAD(&kvm->irq_routing);
Avi Kivity75858a82009-01-04 17:10:50 +0200841 INIT_HLIST_HEAD(&kvm->mask_notifier_list);
842#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800843
Laurent Vivier5f94c172008-05-30 16:05:54 +0200844#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
845 page = alloc_page(GFP_KERNEL | __GFP_ZERO);
846 if (!page) {
847 kfree(kvm);
848 return ERR_PTR(-ENOMEM);
849 }
850 kvm->coalesced_mmio_ring =
851 (struct kvm_coalesced_mmio_ring *)page_address(page);
852#endif
853
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200854#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
855 {
856 int err;
857 kvm->mmu_notifier.ops = &kvm_mmu_notifier_ops;
858 err = mmu_notifier_register(&kvm->mmu_notifier, current->mm);
859 if (err) {
860#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
861 put_page(page);
862#endif
863 kfree(kvm);
864 return ERR_PTR(err);
865 }
866 }
867#endif
868
Avi Kivity6d4e4c42007-11-21 16:41:05 +0200869 kvm->mm = current->mm;
870 atomic_inc(&kvm->mm->mm_count);
Marcelo Tosattiaaee2c92007-12-20 19:18:26 -0500871 spin_lock_init(&kvm->mmu_lock);
Eddie Dong74906342007-06-19 18:05:03 +0300872 kvm_io_bus_init(&kvm->pio_bus);
Shaohua Li11ec2802007-07-23 14:51:37 +0800873 mutex_init(&kvm->lock);
Gregory Haskins2eeb2e92007-05-31 14:08:53 -0400874 kvm_io_bus_init(&kvm->mmio_bus);
Izik Eidus72dc67a2008-02-10 18:04:15 +0200875 init_rwsem(&kvm->slots_lock);
Izik Eidusd39f13b2008-03-30 16:01:25 +0300876 atomic_set(&kvm->users_count, 1);
Rusty Russell5e58cfe2007-07-23 17:08:21 +1000877 spin_lock(&kvm_lock);
878 list_add(&kvm->vm_list, &vm_list);
879 spin_unlock(&kvm_lock);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200880#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
881 kvm_coalesced_mmio_init(kvm);
882#endif
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +0800883out:
Avi Kivityf17abe92007-02-21 19:28:04 +0200884 return kvm;
885}
886
Avi Kivity6aa8b732006-12-10 02:21:36 -0800887/*
888 * Free any memory in @free but not in @dont.
889 */
890static void kvm_free_physmem_slot(struct kvm_memory_slot *free,
891 struct kvm_memory_slot *dont)
892{
Izik Eidus290fc382007-09-27 14:11:22 +0200893 if (!dont || free->rmap != dont->rmap)
894 vfree(free->rmap);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800895
896 if (!dont || free->dirty_bitmap != dont->dirty_bitmap)
897 vfree(free->dirty_bitmap);
898
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300899 if (!dont || free->lpage_info != dont->lpage_info)
900 vfree(free->lpage_info);
901
Avi Kivity6aa8b732006-12-10 02:21:36 -0800902 free->npages = 0;
Al Viro8b6d44c2007-02-09 16:38:40 +0000903 free->dirty_bitmap = NULL;
Anthony Liguori8d4e1282007-10-18 09:59:34 -0500904 free->rmap = NULL;
Marcelo Tosatti05da4552008-02-23 11:44:30 -0300905 free->lpage_info = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800906}
907
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +0800908void kvm_free_physmem(struct kvm *kvm)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800909{
910 int i;
911
912 for (i = 0; i < kvm->nmemslots; ++i)
Al Viro8b6d44c2007-02-09 16:38:40 +0000913 kvm_free_physmem_slot(&kvm->memslots[i], NULL);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800914}
915
Avi Kivityf17abe92007-02-21 19:28:04 +0200916static void kvm_destroy_vm(struct kvm *kvm)
917{
Avi Kivity6d4e4c42007-11-21 16:41:05 +0200918 struct mm_struct *mm = kvm->mm;
919
Sheng Yangad8ba2c2009-01-06 10:03:02 +0800920 kvm_arch_sync_events(kvm);
Avi Kivity133de902007-02-12 00:54:44 -0800921 spin_lock(&kvm_lock);
922 list_del(&kvm->vm_list);
923 spin_unlock(&kvm_lock);
Avi Kivity399ec802008-11-19 13:58:46 +0200924 kvm_free_irq_routing(kvm);
Eddie Dong74906342007-06-19 18:05:03 +0300925 kvm_io_bus_destroy(&kvm->pio_bus);
Gregory Haskins2eeb2e92007-05-31 14:08:53 -0400926 kvm_io_bus_destroy(&kvm->mmio_bus);
Laurent Vivier5f94c172008-05-30 16:05:54 +0200927#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
928 if (kvm->coalesced_mmio_ring != NULL)
929 free_page((unsigned long)kvm->coalesced_mmio_ring);
930#endif
Andrea Arcangelie930bff2008-07-25 16:24:52 +0200931#if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
932 mmu_notifier_unregister(&kvm->mmu_notifier, kvm->mm);
933#endif
Zhang Xiantaod19a9cd2007-11-18 18:43:45 +0800934 kvm_arch_destroy_vm(kvm);
Avi Kivity6d4e4c42007-11-21 16:41:05 +0200935 mmdrop(mm);
Avi Kivityf17abe92007-02-21 19:28:04 +0200936}
937
Izik Eidusd39f13b2008-03-30 16:01:25 +0300938void kvm_get_kvm(struct kvm *kvm)
939{
940 atomic_inc(&kvm->users_count);
941}
942EXPORT_SYMBOL_GPL(kvm_get_kvm);
943
944void kvm_put_kvm(struct kvm *kvm)
945{
946 if (atomic_dec_and_test(&kvm->users_count))
947 kvm_destroy_vm(kvm);
948}
949EXPORT_SYMBOL_GPL(kvm_put_kvm);
950
951
Avi Kivityf17abe92007-02-21 19:28:04 +0200952static int kvm_vm_release(struct inode *inode, struct file *filp)
953{
954 struct kvm *kvm = filp->private_data;
955
Izik Eidusd39f13b2008-03-30 16:01:25 +0300956 kvm_put_kvm(kvm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800957 return 0;
958}
959
Avi Kivity6aa8b732006-12-10 02:21:36 -0800960/*
Avi Kivity6aa8b732006-12-10 02:21:36 -0800961 * Allocate some memory and give it an address in the guest physical address
962 * space.
963 *
964 * Discontiguous memory is allowed, mostly for framebuffers.
Sheng Yangf78e0e22007-10-29 09:40:42 +0800965 *
Marcelo Tosatti10589a42007-12-20 19:18:22 -0500966 * Must be called holding mmap_sem for write.
Avi Kivity6aa8b732006-12-10 02:21:36 -0800967 */
Sheng Yangf78e0e22007-10-29 09:40:42 +0800968int __kvm_set_memory_region(struct kvm *kvm,
969 struct kvm_userspace_memory_region *mem,
970 int user_alloc)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800971{
972 int r;
973 gfn_t base_gfn;
974 unsigned long npages;
Avi Kivity99894a72009-03-29 16:31:25 +0300975 int largepages;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800976 unsigned long i;
977 struct kvm_memory_slot *memslot;
978 struct kvm_memory_slot old, new;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800979
980 r = -EINVAL;
981 /* General sanity checks */
982 if (mem->memory_size & (PAGE_SIZE - 1))
983 goto out;
984 if (mem->guest_phys_addr & (PAGE_SIZE - 1))
985 goto out;
Sheng Yange7cacd42008-11-11 15:30:40 +0800986 if (user_alloc && (mem->userspace_addr & (PAGE_SIZE - 1)))
Hollis Blanchard78749802008-11-07 13:32:12 -0600987 goto out;
Izik Eiduse0d62c72007-10-24 23:57:46 +0200988 if (mem->slot >= KVM_MEMORY_SLOTS + KVM_PRIVATE_MEM_SLOTS)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800989 goto out;
990 if (mem->guest_phys_addr + mem->memory_size < mem->guest_phys_addr)
991 goto out;
992
993 memslot = &kvm->memslots[mem->slot];
994 base_gfn = mem->guest_phys_addr >> PAGE_SHIFT;
995 npages = mem->memory_size >> PAGE_SHIFT;
996
997 if (!npages)
998 mem->flags &= ~KVM_MEM_LOG_DIRTY_PAGES;
999
Avi Kivity6aa8b732006-12-10 02:21:36 -08001000 new = old = *memslot;
1001
1002 new.base_gfn = base_gfn;
1003 new.npages = npages;
1004 new.flags = mem->flags;
1005
1006 /* Disallow changing a memory slot's size. */
1007 r = -EINVAL;
1008 if (npages && old.npages && npages != old.npages)
Sheng Yangf78e0e22007-10-29 09:40:42 +08001009 goto out_free;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001010
1011 /* Check for overlaps */
1012 r = -EEXIST;
1013 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1014 struct kvm_memory_slot *s = &kvm->memslots[i];
1015
Jan Kiszka4cd481f2009-04-13 11:59:32 +02001016 if (s == memslot || !s->npages)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001017 continue;
1018 if (!((base_gfn + npages <= s->base_gfn) ||
1019 (base_gfn >= s->base_gfn + s->npages)))
Sheng Yangf78e0e22007-10-29 09:40:42 +08001020 goto out_free;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001021 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001022
Avi Kivity6aa8b732006-12-10 02:21:36 -08001023 /* Free page dirty bitmap if unneeded */
1024 if (!(new.flags & KVM_MEM_LOG_DIRTY_PAGES))
Al Viro8b6d44c2007-02-09 16:38:40 +00001025 new.dirty_bitmap = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001026
1027 r = -ENOMEM;
1028
1029 /* Allocate if a slot is being created */
Carsten Otteeff01142008-06-27 15:05:31 +02001030#ifndef CONFIG_S390
Anthony Liguori8d4e1282007-10-18 09:59:34 -05001031 if (npages && !new.rmap) {
Mike Dayd77c26f2007-10-08 09:02:08 -04001032 new.rmap = vmalloc(npages * sizeof(struct page *));
Izik Eidus290fc382007-09-27 14:11:22 +02001033
1034 if (!new.rmap)
Sheng Yangf78e0e22007-10-29 09:40:42 +08001035 goto out_free;
Izik Eidus290fc382007-09-27 14:11:22 +02001036
Izik Eidus290fc382007-09-27 14:11:22 +02001037 memset(new.rmap, 0, npages * sizeof(*new.rmap));
Anthony Liguori8d4e1282007-10-18 09:59:34 -05001038
Izik Eidus80b14b52007-10-25 11:54:04 +02001039 new.user_alloc = user_alloc;
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02001040 /*
1041 * hva_to_rmmap() serialzies with the mmu_lock and to be
1042 * safe it has to ignore memslots with !user_alloc &&
1043 * !userspace_addr.
1044 */
1045 if (user_alloc)
1046 new.userspace_addr = mem->userspace_addr;
1047 else
1048 new.userspace_addr = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001049 }
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001050 if (npages && !new.lpage_info) {
Avi Kivity99894a72009-03-29 16:31:25 +03001051 largepages = 1 + (base_gfn + npages - 1) / KVM_PAGES_PER_HPAGE;
1052 largepages -= base_gfn / KVM_PAGES_PER_HPAGE;
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001053
1054 new.lpage_info = vmalloc(largepages * sizeof(*new.lpage_info));
1055
1056 if (!new.lpage_info)
1057 goto out_free;
1058
1059 memset(new.lpage_info, 0, largepages * sizeof(*new.lpage_info));
1060
1061 if (base_gfn % KVM_PAGES_PER_HPAGE)
1062 new.lpage_info[0].write_count = 1;
1063 if ((base_gfn+npages) % KVM_PAGES_PER_HPAGE)
1064 new.lpage_info[largepages-1].write_count = 1;
1065 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001066
1067 /* Allocate page dirty bitmap if needed */
1068 if ((new.flags & KVM_MEM_LOG_DIRTY_PAGES) && !new.dirty_bitmap) {
1069 unsigned dirty_bytes = ALIGN(npages, BITS_PER_LONG) / 8;
1070
1071 new.dirty_bitmap = vmalloc(dirty_bytes);
1072 if (!new.dirty_bitmap)
Sheng Yangf78e0e22007-10-29 09:40:42 +08001073 goto out_free;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001074 memset(new.dirty_bitmap, 0, dirty_bytes);
1075 }
Carsten Otteeff01142008-06-27 15:05:31 +02001076#endif /* not defined CONFIG_S390 */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001077
Marcelo Tosatti34d4cb82008-07-10 20:49:31 -03001078 if (!npages)
1079 kvm_arch_flush_shadow(kvm);
1080
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02001081 spin_lock(&kvm->mmu_lock);
1082 if (mem->slot >= kvm->nmemslots)
1083 kvm->nmemslots = mem->slot + 1;
1084
Avi Kivity6aa8b732006-12-10 02:21:36 -08001085 *memslot = new;
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02001086 spin_unlock(&kvm->mmu_lock);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001087
Zhang Xiantao0de10342007-11-20 16:25:04 +08001088 r = kvm_arch_set_memory_region(kvm, mem, old, user_alloc);
1089 if (r) {
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02001090 spin_lock(&kvm->mmu_lock);
Zhang Xiantao0de10342007-11-20 16:25:04 +08001091 *memslot = old;
Andrea Arcangeli604b38a2008-07-25 16:32:03 +02001092 spin_unlock(&kvm->mmu_lock);
Zhang Xiantao0de10342007-11-20 16:25:04 +08001093 goto out_free;
Zhang Xiantao3ad82a72007-11-20 13:11:38 +08001094 }
1095
Glauber Costa6f897242008-12-03 13:40:51 -02001096 kvm_free_physmem_slot(&old, npages ? &new : NULL);
1097 /* Slot deletion case: we have to update the current slot */
1098 if (!npages)
1099 *memslot = old;
Xiantao Zhang8a98f662008-10-06 13:47:38 +08001100#ifdef CONFIG_DMAR
Ben-Ami Yassour62c476c2008-09-14 03:48:28 +03001101 /* map the pages in iommu page table */
1102 r = kvm_iommu_map_pages(kvm, base_gfn, npages);
1103 if (r)
1104 goto out;
Xiantao Zhang8a98f662008-10-06 13:47:38 +08001105#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08001106 return 0;
1107
Sheng Yangf78e0e22007-10-29 09:40:42 +08001108out_free:
Avi Kivity6aa8b732006-12-10 02:21:36 -08001109 kvm_free_physmem_slot(&new, &old);
1110out:
1111 return r;
Izik Eidus210c7c42007-10-24 23:52:57 +02001112
1113}
Sheng Yangf78e0e22007-10-29 09:40:42 +08001114EXPORT_SYMBOL_GPL(__kvm_set_memory_region);
1115
1116int kvm_set_memory_region(struct kvm *kvm,
1117 struct kvm_userspace_memory_region *mem,
1118 int user_alloc)
1119{
1120 int r;
1121
Izik Eidus72dc67a2008-02-10 18:04:15 +02001122 down_write(&kvm->slots_lock);
Sheng Yangf78e0e22007-10-29 09:40:42 +08001123 r = __kvm_set_memory_region(kvm, mem, user_alloc);
Izik Eidus72dc67a2008-02-10 18:04:15 +02001124 up_write(&kvm->slots_lock);
Sheng Yangf78e0e22007-10-29 09:40:42 +08001125 return r;
1126}
Izik Eidus210c7c42007-10-24 23:52:57 +02001127EXPORT_SYMBOL_GPL(kvm_set_memory_region);
1128
Carsten Otte1fe779f2007-10-29 16:08:35 +01001129int kvm_vm_ioctl_set_memory_region(struct kvm *kvm,
1130 struct
1131 kvm_userspace_memory_region *mem,
1132 int user_alloc)
Izik Eidus210c7c42007-10-24 23:52:57 +02001133{
Izik Eiduse0d62c72007-10-24 23:57:46 +02001134 if (mem->slot >= KVM_MEMORY_SLOTS)
1135 return -EINVAL;
Izik Eidus210c7c42007-10-24 23:52:57 +02001136 return kvm_set_memory_region(kvm, mem, user_alloc);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001137}
1138
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08001139int kvm_get_dirty_log(struct kvm *kvm,
1140 struct kvm_dirty_log *log, int *is_dirty)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001141{
1142 struct kvm_memory_slot *memslot;
1143 int r, i;
1144 int n;
1145 unsigned long any = 0;
1146
Avi Kivity6aa8b732006-12-10 02:21:36 -08001147 r = -EINVAL;
1148 if (log->slot >= KVM_MEMORY_SLOTS)
1149 goto out;
1150
1151 memslot = &kvm->memslots[log->slot];
1152 r = -ENOENT;
1153 if (!memslot->dirty_bitmap)
1154 goto out;
1155
Uri Lublincd1a4a92007-02-22 16:43:09 +02001156 n = ALIGN(memslot->npages, BITS_PER_LONG) / 8;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001157
Uri Lublincd1a4a92007-02-22 16:43:09 +02001158 for (i = 0; !any && i < n/sizeof(long); ++i)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001159 any = memslot->dirty_bitmap[i];
1160
1161 r = -EFAULT;
1162 if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n))
1163 goto out;
1164
Zhang Xiantao5bb064d2007-11-18 20:29:43 +08001165 if (any)
1166 *is_dirty = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001167
1168 r = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001169out:
Avi Kivity6aa8b732006-12-10 02:21:36 -08001170 return r;
1171}
1172
Izik Eiduscea7bb22007-10-17 19:17:48 +02001173int is_error_page(struct page *page)
1174{
1175 return page == bad_page;
1176}
1177EXPORT_SYMBOL_GPL(is_error_page);
1178
Anthony Liguori35149e22008-04-02 14:46:56 -05001179int is_error_pfn(pfn_t pfn)
1180{
1181 return pfn == bad_pfn;
1182}
1183EXPORT_SYMBOL_GPL(is_error_pfn);
1184
Izik Eidusf9d46eb2007-11-11 22:02:22 +02001185static inline unsigned long bad_hva(void)
1186{
1187 return PAGE_OFFSET;
1188}
1189
1190int kvm_is_error_hva(unsigned long addr)
1191{
1192 return addr == bad_hva();
1193}
1194EXPORT_SYMBOL_GPL(kvm_is_error_hva);
1195
Izik Eidus28430992008-10-03 17:40:32 +03001196struct kvm_memory_slot *gfn_to_memslot_unaliased(struct kvm *kvm, gfn_t gfn)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001197{
1198 int i;
1199
1200 for (i = 0; i < kvm->nmemslots; ++i) {
1201 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1202
1203 if (gfn >= memslot->base_gfn
1204 && gfn < memslot->base_gfn + memslot->npages)
1205 return memslot;
1206 }
Al Viro8b6d44c2007-02-09 16:38:40 +00001207 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001208}
Izik Eidus28430992008-10-03 17:40:32 +03001209EXPORT_SYMBOL_GPL(gfn_to_memslot_unaliased);
Avi Kivitye8207542007-03-30 16:54:30 +03001210
1211struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn)
1212{
1213 gfn = unalias_gfn(kvm, gfn);
Izik Eidus28430992008-10-03 17:40:32 +03001214 return gfn_to_memslot_unaliased(kvm, gfn);
Avi Kivitye8207542007-03-30 16:54:30 +03001215}
Avi Kivity6aa8b732006-12-10 02:21:36 -08001216
Izik Eiduse0d62c72007-10-24 23:57:46 +02001217int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn)
1218{
1219 int i;
1220
1221 gfn = unalias_gfn(kvm, gfn);
1222 for (i = 0; i < KVM_MEMORY_SLOTS; ++i) {
1223 struct kvm_memory_slot *memslot = &kvm->memslots[i];
1224
1225 if (gfn >= memslot->base_gfn
1226 && gfn < memslot->base_gfn + memslot->npages)
1227 return 1;
1228 }
1229 return 0;
1230}
1231EXPORT_SYMBOL_GPL(kvm_is_visible_gfn);
1232
Marcelo Tosatti05da4552008-02-23 11:44:30 -03001233unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn)
Izik Eidus539cb662007-11-11 22:05:04 +02001234{
1235 struct kvm_memory_slot *slot;
1236
1237 gfn = unalias_gfn(kvm, gfn);
Izik Eidus28430992008-10-03 17:40:32 +03001238 slot = gfn_to_memslot_unaliased(kvm, gfn);
Izik Eidus539cb662007-11-11 22:05:04 +02001239 if (!slot)
1240 return bad_hva();
1241 return (slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE);
1242}
Sheng Yang0d150292008-04-25 21:44:50 +08001243EXPORT_SYMBOL_GPL(gfn_to_hva);
Izik Eidus539cb662007-11-11 22:05:04 +02001244
Anthony Liguori35149e22008-04-02 14:46:56 -05001245pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn)
Avi Kivity954bbbc22007-03-30 14:02:32 +03001246{
Anthony Liguori8d4e1282007-10-18 09:59:34 -05001247 struct page *page[1];
Izik Eidus539cb662007-11-11 22:05:04 +02001248 unsigned long addr;
Anthony Liguori8d4e1282007-10-18 09:59:34 -05001249 int npages;
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001250 pfn_t pfn;
Avi Kivity954bbbc22007-03-30 14:02:32 +03001251
Avi Kivity60395222007-10-21 11:03:36 +02001252 might_sleep();
1253
Izik Eidus539cb662007-11-11 22:05:04 +02001254 addr = gfn_to_hva(kvm, gfn);
1255 if (kvm_is_error_hva(addr)) {
Izik Eidus8a7ae052007-10-18 11:09:33 +02001256 get_page(bad_page);
Anthony Liguori35149e22008-04-02 14:46:56 -05001257 return page_to_pfn(bad_page);
Izik Eidus8a7ae052007-10-18 11:09:33 +02001258 }
Izik Eidus8a7ae052007-10-18 11:09:33 +02001259
Marcelo Tosatti4c2155c2008-09-16 20:54:47 -03001260 npages = get_user_pages_fast(addr, 1, 1, page);
Izik Eidus539cb662007-11-11 22:05:04 +02001261
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001262 if (unlikely(npages != 1)) {
1263 struct vm_area_struct *vma;
Anthony Liguori8d4e1282007-10-18 09:59:34 -05001264
Marcelo Tosatti4c2155c2008-09-16 20:54:47 -03001265 down_read(&current->mm->mmap_sem);
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001266 vma = find_vma(current->mm, addr);
Marcelo Tosatti4c2155c2008-09-16 20:54:47 -03001267
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001268 if (vma == NULL || addr < vma->vm_start ||
1269 !(vma->vm_flags & VM_PFNMAP)) {
Marcelo Tosatti4c2155c2008-09-16 20:54:47 -03001270 up_read(&current->mm->mmap_sem);
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001271 get_page(bad_page);
1272 return page_to_pfn(bad_page);
1273 }
1274
1275 pfn = ((addr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff;
Marcelo Tosatti4c2155c2008-09-16 20:54:47 -03001276 up_read(&current->mm->mmap_sem);
Xiantao Zhangc77fb9d2008-09-27 10:55:40 +08001277 BUG_ON(!kvm_is_mmio_pfn(pfn));
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001278 } else
1279 pfn = page_to_pfn(page[0]);
1280
1281 return pfn;
Anthony Liguori35149e22008-04-02 14:46:56 -05001282}
1283
1284EXPORT_SYMBOL_GPL(gfn_to_pfn);
1285
1286struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn)
1287{
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001288 pfn_t pfn;
1289
1290 pfn = gfn_to_pfn(kvm, gfn);
Xiantao Zhangc77fb9d2008-09-27 10:55:40 +08001291 if (!kvm_is_mmio_pfn(pfn))
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001292 return pfn_to_page(pfn);
1293
Xiantao Zhangc77fb9d2008-09-27 10:55:40 +08001294 WARN_ON(kvm_is_mmio_pfn(pfn));
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001295
1296 get_page(bad_page);
1297 return bad_page;
Avi Kivity954bbbc22007-03-30 14:02:32 +03001298}
Anthony Liguoriaab61cc2007-10-29 15:15:20 -05001299
Avi Kivity954bbbc22007-03-30 14:02:32 +03001300EXPORT_SYMBOL_GPL(gfn_to_page);
1301
Izik Eidusb4231d62007-11-20 11:49:33 +02001302void kvm_release_page_clean(struct page *page)
1303{
Anthony Liguori35149e22008-04-02 14:46:56 -05001304 kvm_release_pfn_clean(page_to_pfn(page));
Izik Eidusb4231d62007-11-20 11:49:33 +02001305}
1306EXPORT_SYMBOL_GPL(kvm_release_page_clean);
1307
Anthony Liguori35149e22008-04-02 14:46:56 -05001308void kvm_release_pfn_clean(pfn_t pfn)
1309{
Xiantao Zhangc77fb9d2008-09-27 10:55:40 +08001310 if (!kvm_is_mmio_pfn(pfn))
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001311 put_page(pfn_to_page(pfn));
Anthony Liguori35149e22008-04-02 14:46:56 -05001312}
1313EXPORT_SYMBOL_GPL(kvm_release_pfn_clean);
1314
Izik Eidusb4231d62007-11-20 11:49:33 +02001315void kvm_release_page_dirty(struct page *page)
Izik Eidus8a7ae052007-10-18 11:09:33 +02001316{
Anthony Liguori35149e22008-04-02 14:46:56 -05001317 kvm_release_pfn_dirty(page_to_pfn(page));
Izik Eidus8a7ae052007-10-18 11:09:33 +02001318}
Izik Eidusb4231d62007-11-20 11:49:33 +02001319EXPORT_SYMBOL_GPL(kvm_release_page_dirty);
Izik Eidus8a7ae052007-10-18 11:09:33 +02001320
Anthony Liguori35149e22008-04-02 14:46:56 -05001321void kvm_release_pfn_dirty(pfn_t pfn)
1322{
1323 kvm_set_pfn_dirty(pfn);
1324 kvm_release_pfn_clean(pfn);
1325}
1326EXPORT_SYMBOL_GPL(kvm_release_pfn_dirty);
1327
1328void kvm_set_page_dirty(struct page *page)
1329{
1330 kvm_set_pfn_dirty(page_to_pfn(page));
1331}
1332EXPORT_SYMBOL_GPL(kvm_set_page_dirty);
1333
1334void kvm_set_pfn_dirty(pfn_t pfn)
1335{
Xiantao Zhangc77fb9d2008-09-27 10:55:40 +08001336 if (!kvm_is_mmio_pfn(pfn)) {
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001337 struct page *page = pfn_to_page(pfn);
1338 if (!PageReserved(page))
1339 SetPageDirty(page);
1340 }
Anthony Liguori35149e22008-04-02 14:46:56 -05001341}
1342EXPORT_SYMBOL_GPL(kvm_set_pfn_dirty);
1343
1344void kvm_set_pfn_accessed(pfn_t pfn)
1345{
Xiantao Zhangc77fb9d2008-09-27 10:55:40 +08001346 if (!kvm_is_mmio_pfn(pfn))
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001347 mark_page_accessed(pfn_to_page(pfn));
Anthony Liguori35149e22008-04-02 14:46:56 -05001348}
1349EXPORT_SYMBOL_GPL(kvm_set_pfn_accessed);
1350
1351void kvm_get_pfn(pfn_t pfn)
1352{
Xiantao Zhangc77fb9d2008-09-27 10:55:40 +08001353 if (!kvm_is_mmio_pfn(pfn))
Anthony Liguori2e2e3732008-04-30 15:37:07 -05001354 get_page(pfn_to_page(pfn));
Anthony Liguori35149e22008-04-02 14:46:56 -05001355}
1356EXPORT_SYMBOL_GPL(kvm_get_pfn);
1357
Izik Eidus195aefd2007-10-01 22:14:18 +02001358static int next_segment(unsigned long len, int offset)
1359{
1360 if (len > PAGE_SIZE - offset)
1361 return PAGE_SIZE - offset;
1362 else
1363 return len;
1364}
1365
1366int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
1367 int len)
1368{
Izik Eiduse0506bc2007-11-11 22:10:22 +02001369 int r;
1370 unsigned long addr;
Izik Eidus195aefd2007-10-01 22:14:18 +02001371
Izik Eiduse0506bc2007-11-11 22:10:22 +02001372 addr = gfn_to_hva(kvm, gfn);
1373 if (kvm_is_error_hva(addr))
Izik Eidus195aefd2007-10-01 22:14:18 +02001374 return -EFAULT;
Izik Eiduse0506bc2007-11-11 22:10:22 +02001375 r = copy_from_user(data, (void __user *)addr + offset, len);
1376 if (r)
1377 return -EFAULT;
Izik Eidus195aefd2007-10-01 22:14:18 +02001378 return 0;
1379}
1380EXPORT_SYMBOL_GPL(kvm_read_guest_page);
1381
1382int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len)
1383{
1384 gfn_t gfn = gpa >> PAGE_SHIFT;
1385 int seg;
1386 int offset = offset_in_page(gpa);
1387 int ret;
1388
1389 while ((seg = next_segment(len, offset)) != 0) {
1390 ret = kvm_read_guest_page(kvm, gfn, data, offset, seg);
1391 if (ret < 0)
1392 return ret;
1393 offset = 0;
1394 len -= seg;
1395 data += seg;
1396 ++gfn;
1397 }
1398 return 0;
1399}
1400EXPORT_SYMBOL_GPL(kvm_read_guest);
1401
Marcelo Tosatti7ec54582007-12-20 19:18:23 -05001402int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
1403 unsigned long len)
1404{
1405 int r;
1406 unsigned long addr;
1407 gfn_t gfn = gpa >> PAGE_SHIFT;
1408 int offset = offset_in_page(gpa);
1409
1410 addr = gfn_to_hva(kvm, gfn);
1411 if (kvm_is_error_hva(addr))
1412 return -EFAULT;
Andrea Arcangeli0aac03f2008-01-30 19:57:35 +01001413 pagefault_disable();
Marcelo Tosatti7ec54582007-12-20 19:18:23 -05001414 r = __copy_from_user_inatomic(data, (void __user *)addr + offset, len);
Andrea Arcangeli0aac03f2008-01-30 19:57:35 +01001415 pagefault_enable();
Marcelo Tosatti7ec54582007-12-20 19:18:23 -05001416 if (r)
1417 return -EFAULT;
1418 return 0;
1419}
1420EXPORT_SYMBOL(kvm_read_guest_atomic);
1421
Izik Eidus195aefd2007-10-01 22:14:18 +02001422int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
1423 int offset, int len)
1424{
Izik Eiduse0506bc2007-11-11 22:10:22 +02001425 int r;
1426 unsigned long addr;
Izik Eidus195aefd2007-10-01 22:14:18 +02001427
Izik Eiduse0506bc2007-11-11 22:10:22 +02001428 addr = gfn_to_hva(kvm, gfn);
1429 if (kvm_is_error_hva(addr))
Izik Eidus195aefd2007-10-01 22:14:18 +02001430 return -EFAULT;
Izik Eiduse0506bc2007-11-11 22:10:22 +02001431 r = copy_to_user((void __user *)addr + offset, data, len);
1432 if (r)
1433 return -EFAULT;
Izik Eidus195aefd2007-10-01 22:14:18 +02001434 mark_page_dirty(kvm, gfn);
1435 return 0;
1436}
1437EXPORT_SYMBOL_GPL(kvm_write_guest_page);
1438
1439int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
1440 unsigned long len)
1441{
1442 gfn_t gfn = gpa >> PAGE_SHIFT;
1443 int seg;
1444 int offset = offset_in_page(gpa);
1445 int ret;
1446
1447 while ((seg = next_segment(len, offset)) != 0) {
1448 ret = kvm_write_guest_page(kvm, gfn, data, offset, seg);
1449 if (ret < 0)
1450 return ret;
1451 offset = 0;
1452 len -= seg;
1453 data += seg;
1454 ++gfn;
1455 }
1456 return 0;
1457}
1458
1459int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len)
1460{
Izik Eidus3e021bf2007-11-19 11:16:57 +02001461 return kvm_write_guest_page(kvm, gfn, empty_zero_page, offset, len);
Izik Eidus195aefd2007-10-01 22:14:18 +02001462}
1463EXPORT_SYMBOL_GPL(kvm_clear_guest_page);
1464
1465int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len)
1466{
1467 gfn_t gfn = gpa >> PAGE_SHIFT;
1468 int seg;
1469 int offset = offset_in_page(gpa);
1470 int ret;
1471
1472 while ((seg = next_segment(len, offset)) != 0) {
1473 ret = kvm_clear_guest_page(kvm, gfn, offset, seg);
1474 if (ret < 0)
1475 return ret;
1476 offset = 0;
1477 len -= seg;
1478 ++gfn;
1479 }
1480 return 0;
1481}
1482EXPORT_SYMBOL_GPL(kvm_clear_guest);
1483
Avi Kivity6aa8b732006-12-10 02:21:36 -08001484void mark_page_dirty(struct kvm *kvm, gfn_t gfn)
1485{
Nguyen Anh Quynh31389942007-06-05 10:35:19 +03001486 struct kvm_memory_slot *memslot;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001487
Uri Lublin3b6fff12007-10-30 10:42:09 +02001488 gfn = unalias_gfn(kvm, gfn);
Izik Eidus28430992008-10-03 17:40:32 +03001489 memslot = gfn_to_memslot_unaliased(kvm, gfn);
Rusty Russell7e9d6192007-07-31 20:41:14 +10001490 if (memslot && memslot->dirty_bitmap) {
1491 unsigned long rel_gfn = gfn - memslot->base_gfn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001492
Rusty Russell7e9d6192007-07-31 20:41:14 +10001493 /* avoid RMW */
1494 if (!test_bit(rel_gfn, memslot->dirty_bitmap))
1495 set_bit(rel_gfn, memslot->dirty_bitmap);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001496 }
1497}
1498
Eddie Dongb6958ce2007-07-18 12:15:21 +03001499/*
1500 * The vCPU has executed a HLT instruction with in-kernel mode enabled.
1501 */
Hollis Blanchard8776e512007-10-31 17:24:24 -05001502void kvm_vcpu_block(struct kvm_vcpu *vcpu)
Eddie Dongb6958ce2007-07-18 12:15:21 +03001503{
Marcelo Tosattie5c239c2008-05-08 19:47:01 -03001504 DEFINE_WAIT(wait);
Eddie Dongb6958ce2007-07-18 12:15:21 +03001505
Marcelo Tosattie5c239c2008-05-08 19:47:01 -03001506 for (;;) {
1507 prepare_to_wait(&vcpu->wq, &wait, TASK_INTERRUPTIBLE);
Eddie Dongb6958ce2007-07-18 12:15:21 +03001508
Marcelo Tosattid7690172008-09-08 15:23:48 -03001509 if (kvm_cpu_has_interrupt(vcpu) ||
1510 kvm_cpu_has_pending_timer(vcpu) ||
1511 kvm_arch_vcpu_runnable(vcpu)) {
1512 set_bit(KVM_REQ_UNHALT, &vcpu->requests);
Marcelo Tosattie5c239c2008-05-08 19:47:01 -03001513 break;
Marcelo Tosattid7690172008-09-08 15:23:48 -03001514 }
Marcelo Tosattie5c239c2008-05-08 19:47:01 -03001515 if (signal_pending(current))
1516 break;
1517
Eddie Dongb6958ce2007-07-18 12:15:21 +03001518 vcpu_put(vcpu);
1519 schedule();
1520 vcpu_load(vcpu);
1521 }
1522
Marcelo Tosattie5c239c2008-05-08 19:47:01 -03001523 finish_wait(&vcpu->wq, &wait);
Eddie Dongb6958ce2007-07-18 12:15:21 +03001524}
1525
Avi Kivity6aa8b732006-12-10 02:21:36 -08001526void kvm_resched(struct kvm_vcpu *vcpu)
1527{
Yaozu Dong3fca0362007-04-25 16:49:19 +03001528 if (!need_resched())
1529 return;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001530 cond_resched();
Avi Kivity6aa8b732006-12-10 02:21:36 -08001531}
1532EXPORT_SYMBOL_GPL(kvm_resched);
1533
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11001534static int kvm_vcpu_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02001535{
1536 struct kvm_vcpu *vcpu = vma->vm_file->private_data;
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02001537 struct page *page;
1538
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11001539 if (vmf->pgoff == 0)
Avi Kivity039576c2007-03-20 12:46:50 +02001540 page = virt_to_page(vcpu->run);
Avi Kivity09566762008-01-23 18:14:23 +02001541#ifdef CONFIG_X86
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11001542 else if (vmf->pgoff == KVM_PIO_PAGE_OFFSET)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001543 page = virt_to_page(vcpu->arch.pio_data);
Avi Kivity09566762008-01-23 18:14:23 +02001544#endif
Laurent Vivier5f94c172008-05-30 16:05:54 +02001545#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1546 else if (vmf->pgoff == KVM_COALESCED_MMIO_PAGE_OFFSET)
1547 page = virt_to_page(vcpu->kvm->coalesced_mmio_ring);
1548#endif
Avi Kivity039576c2007-03-20 12:46:50 +02001549 else
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11001550 return VM_FAULT_SIGBUS;
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02001551 get_page(page);
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11001552 vmf->page = page;
1553 return 0;
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02001554}
1555
1556static struct vm_operations_struct kvm_vcpu_vm_ops = {
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11001557 .fault = kvm_vcpu_fault,
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02001558};
1559
1560static int kvm_vcpu_mmap(struct file *file, struct vm_area_struct *vma)
1561{
1562 vma->vm_ops = &kvm_vcpu_vm_ops;
1563 return 0;
1564}
1565
Avi Kivitybccf2152007-02-21 18:04:26 +02001566static int kvm_vcpu_release(struct inode *inode, struct file *filp)
1567{
1568 struct kvm_vcpu *vcpu = filp->private_data;
1569
Al Viro66c0b392008-04-19 20:33:56 +01001570 kvm_put_kvm(vcpu->kvm);
Avi Kivitybccf2152007-02-21 18:04:26 +02001571 return 0;
1572}
1573
Christian Borntraeger3d3aab12008-12-02 11:17:32 +01001574static struct file_operations kvm_vcpu_fops = {
Avi Kivitybccf2152007-02-21 18:04:26 +02001575 .release = kvm_vcpu_release,
1576 .unlocked_ioctl = kvm_vcpu_ioctl,
1577 .compat_ioctl = kvm_vcpu_ioctl,
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02001578 .mmap = kvm_vcpu_mmap,
Avi Kivitybccf2152007-02-21 18:04:26 +02001579};
1580
1581/*
1582 * Allocates an inode for the vcpu.
1583 */
1584static int create_vcpu_fd(struct kvm_vcpu *vcpu)
1585{
Ulrich Drepper7d9dbca2008-07-23 21:29:22 -07001586 int fd = anon_inode_getfd("kvm-vcpu", &kvm_vcpu_fops, vcpu, 0);
Al Viro2030a422008-02-23 06:46:49 -05001587 if (fd < 0)
Al Viro66c0b392008-04-19 20:33:56 +01001588 kvm_put_kvm(vcpu->kvm);
Avi Kivitybccf2152007-02-21 18:04:26 +02001589 return fd;
Avi Kivitybccf2152007-02-21 18:04:26 +02001590}
1591
Avi Kivityc5ea7662007-02-20 18:41:05 +02001592/*
1593 * Creates some virtual cpus. Good luck creating more than one.
1594 */
1595static int kvm_vm_ioctl_create_vcpu(struct kvm *kvm, int n)
1596{
1597 int r;
1598 struct kvm_vcpu *vcpu;
1599
Avi Kivityc5ea7662007-02-20 18:41:05 +02001600 if (!valid_vcpu(n))
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001601 return -EINVAL;
Avi Kivityc5ea7662007-02-20 18:41:05 +02001602
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08001603 vcpu = kvm_arch_vcpu_create(kvm, n);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001604 if (IS_ERR(vcpu))
1605 return PTR_ERR(vcpu);
Avi Kivityc5ea7662007-02-20 18:41:05 +02001606
Avi Kivity15ad7142007-07-11 18:17:21 +03001607 preempt_notifier_init(&vcpu->preempt_notifier, &kvm_preempt_ops);
1608
Avi Kivity26e52152007-11-20 15:30:24 +02001609 r = kvm_arch_vcpu_setup(vcpu);
1610 if (r)
Glauber Costa7d8fece2008-09-17 23:16:59 -03001611 return r;
Avi Kivity26e52152007-11-20 15:30:24 +02001612
Shaohua Li11ec2802007-07-23 14:51:37 +08001613 mutex_lock(&kvm->lock);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001614 if (kvm->vcpus[n]) {
1615 r = -EEXIST;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08001616 goto vcpu_destroy;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001617 }
1618 kvm->vcpus[n] = vcpu;
Shaohua Li11ec2802007-07-23 14:51:37 +08001619 mutex_unlock(&kvm->lock);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001620
1621 /* Now it's all set up, let userspace reach it */
Al Viro66c0b392008-04-19 20:33:56 +01001622 kvm_get_kvm(kvm);
Avi Kivitybccf2152007-02-21 18:04:26 +02001623 r = create_vcpu_fd(vcpu);
1624 if (r < 0)
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001625 goto unlink;
Avi Kivitybccf2152007-02-21 18:04:26 +02001626 return r;
Avi Kivityc5ea7662007-02-20 18:41:05 +02001627
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001628unlink:
Shaohua Li11ec2802007-07-23 14:51:37 +08001629 mutex_lock(&kvm->lock);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001630 kvm->vcpus[n] = NULL;
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08001631vcpu_destroy:
Glauber Costa7d8fece2008-09-17 23:16:59 -03001632 mutex_unlock(&kvm->lock);
Hollis Blanchardd40ccc62007-11-19 14:04:43 -06001633 kvm_arch_vcpu_destroy(vcpu);
Avi Kivityc5ea7662007-02-20 18:41:05 +02001634 return r;
1635}
1636
Avi Kivity1961d272007-03-05 19:46:05 +02001637static int kvm_vcpu_ioctl_set_sigmask(struct kvm_vcpu *vcpu, sigset_t *sigset)
1638{
1639 if (sigset) {
1640 sigdelsetmask(sigset, sigmask(SIGKILL)|sigmask(SIGSTOP));
1641 vcpu->sigset_active = 1;
1642 vcpu->sigset = *sigset;
1643 } else
1644 vcpu->sigset_active = 0;
1645 return 0;
1646}
1647
Sheng Yangc1e01512009-02-25 17:22:26 +08001648#ifdef __KVM_HAVE_MSIX
1649static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
1650 struct kvm_assigned_msix_nr *entry_nr)
1651{
1652 int r = 0;
1653 struct kvm_assigned_dev_kernel *adev;
1654
1655 mutex_lock(&kvm->lock);
1656
1657 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1658 entry_nr->assigned_dev_id);
1659 if (!adev) {
1660 r = -EINVAL;
1661 goto msix_nr_out;
1662 }
1663
1664 if (adev->entries_nr == 0) {
1665 adev->entries_nr = entry_nr->entry_nr;
1666 if (adev->entries_nr == 0 ||
1667 adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) {
1668 r = -EINVAL;
1669 goto msix_nr_out;
1670 }
1671
1672 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
1673 entry_nr->entry_nr,
1674 GFP_KERNEL);
1675 if (!adev->host_msix_entries) {
1676 r = -ENOMEM;
1677 goto msix_nr_out;
1678 }
1679 adev->guest_msix_entries = kzalloc(
1680 sizeof(struct kvm_guest_msix_entry) *
1681 entry_nr->entry_nr, GFP_KERNEL);
1682 if (!adev->guest_msix_entries) {
1683 kfree(adev->host_msix_entries);
1684 r = -ENOMEM;
1685 goto msix_nr_out;
1686 }
1687 } else /* Not allowed set MSI-X number twice */
1688 r = -EINVAL;
1689msix_nr_out:
1690 mutex_unlock(&kvm->lock);
1691 return r;
1692}
1693
1694static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
1695 struct kvm_assigned_msix_entry *entry)
1696{
1697 int r = 0, i;
1698 struct kvm_assigned_dev_kernel *adev;
1699
1700 mutex_lock(&kvm->lock);
1701
1702 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
1703 entry->assigned_dev_id);
1704
1705 if (!adev) {
1706 r = -EINVAL;
1707 goto msix_entry_out;
1708 }
1709
1710 for (i = 0; i < adev->entries_nr; i++)
1711 if (adev->guest_msix_entries[i].vector == 0 ||
1712 adev->guest_msix_entries[i].entry == entry->entry) {
1713 adev->guest_msix_entries[i].entry = entry->entry;
1714 adev->guest_msix_entries[i].vector = entry->gsi;
1715 adev->host_msix_entries[i].entry = entry->entry;
1716 break;
1717 }
1718 if (i == adev->entries_nr) {
1719 r = -ENOSPC;
1720 goto msix_entry_out;
1721 }
1722
1723msix_entry_out:
1724 mutex_unlock(&kvm->lock);
1725
1726 return r;
1727}
1728#endif
1729
Avi Kivitybccf2152007-02-21 18:04:26 +02001730static long kvm_vcpu_ioctl(struct file *filp,
1731 unsigned int ioctl, unsigned long arg)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001732{
Avi Kivitybccf2152007-02-21 18:04:26 +02001733 struct kvm_vcpu *vcpu = filp->private_data;
Al Viro2f366982007-02-09 16:38:35 +00001734 void __user *argp = (void __user *)arg;
Carsten Otte313a3dc2007-10-11 19:16:52 +02001735 int r;
Dave Hansenfa3795a2008-08-11 10:01:46 -07001736 struct kvm_fpu *fpu = NULL;
1737 struct kvm_sregs *kvm_sregs = NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001738
Avi Kivity6d4e4c42007-11-21 16:41:05 +02001739 if (vcpu->kvm->mm != current->mm)
1740 return -EIO;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001741 switch (ioctl) {
Avi Kivity9a2bb7f2007-02-22 12:58:31 +02001742 case KVM_RUN:
Avi Kivityf0fe5102007-03-07 13:11:17 +02001743 r = -EINVAL;
1744 if (arg)
1745 goto out;
Hollis Blanchardb6c7a5d2007-11-01 14:16:10 -05001746 r = kvm_arch_vcpu_ioctl_run(vcpu, vcpu->run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001747 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001748 case KVM_GET_REGS: {
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08001749 struct kvm_regs *kvm_regs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001750
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08001751 r = -ENOMEM;
1752 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1753 if (!kvm_regs)
1754 goto out;
1755 r = kvm_arch_vcpu_ioctl_get_regs(vcpu, kvm_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001756 if (r)
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08001757 goto out_free1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001758 r = -EFAULT;
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08001759 if (copy_to_user(argp, kvm_regs, sizeof(struct kvm_regs)))
1760 goto out_free1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001761 r = 0;
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08001762out_free1:
1763 kfree(kvm_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001764 break;
1765 }
1766 case KVM_SET_REGS: {
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08001767 struct kvm_regs *kvm_regs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001768
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08001769 r = -ENOMEM;
1770 kvm_regs = kzalloc(sizeof(struct kvm_regs), GFP_KERNEL);
1771 if (!kvm_regs)
1772 goto out;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001773 r = -EFAULT;
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08001774 if (copy_from_user(kvm_regs, argp, sizeof(struct kvm_regs)))
1775 goto out_free2;
1776 r = kvm_arch_vcpu_ioctl_set_regs(vcpu, kvm_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001777 if (r)
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08001778 goto out_free2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001779 r = 0;
Xiantao Zhang3e4bb3a2008-02-25 18:52:20 +08001780out_free2:
1781 kfree(kvm_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001782 break;
1783 }
1784 case KVM_GET_SREGS: {
Dave Hansenfa3795a2008-08-11 10:01:46 -07001785 kvm_sregs = kzalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1786 r = -ENOMEM;
1787 if (!kvm_sregs)
1788 goto out;
1789 r = kvm_arch_vcpu_ioctl_get_sregs(vcpu, kvm_sregs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001790 if (r)
1791 goto out;
1792 r = -EFAULT;
Dave Hansenfa3795a2008-08-11 10:01:46 -07001793 if (copy_to_user(argp, kvm_sregs, sizeof(struct kvm_sregs)))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001794 goto out;
1795 r = 0;
1796 break;
1797 }
1798 case KVM_SET_SREGS: {
Dave Hansenfa3795a2008-08-11 10:01:46 -07001799 kvm_sregs = kmalloc(sizeof(struct kvm_sregs), GFP_KERNEL);
1800 r = -ENOMEM;
1801 if (!kvm_sregs)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001802 goto out;
Dave Hansenfa3795a2008-08-11 10:01:46 -07001803 r = -EFAULT;
1804 if (copy_from_user(kvm_sregs, argp, sizeof(struct kvm_sregs)))
1805 goto out;
1806 r = kvm_arch_vcpu_ioctl_set_sregs(vcpu, kvm_sregs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001807 if (r)
1808 goto out;
1809 r = 0;
1810 break;
1811 }
Marcelo Tosatti62d9f0d2008-04-11 13:24:45 -03001812 case KVM_GET_MP_STATE: {
1813 struct kvm_mp_state mp_state;
1814
1815 r = kvm_arch_vcpu_ioctl_get_mpstate(vcpu, &mp_state);
1816 if (r)
1817 goto out;
1818 r = -EFAULT;
1819 if (copy_to_user(argp, &mp_state, sizeof mp_state))
1820 goto out;
1821 r = 0;
1822 break;
1823 }
1824 case KVM_SET_MP_STATE: {
1825 struct kvm_mp_state mp_state;
1826
1827 r = -EFAULT;
1828 if (copy_from_user(&mp_state, argp, sizeof mp_state))
1829 goto out;
1830 r = kvm_arch_vcpu_ioctl_set_mpstate(vcpu, &mp_state);
1831 if (r)
1832 goto out;
1833 r = 0;
1834 break;
1835 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001836 case KVM_TRANSLATE: {
1837 struct kvm_translation tr;
1838
1839 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00001840 if (copy_from_user(&tr, argp, sizeof tr))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001841 goto out;
Zhang Xiantao8b006792007-11-16 13:05:55 +08001842 r = kvm_arch_vcpu_ioctl_translate(vcpu, &tr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001843 if (r)
1844 goto out;
1845 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00001846 if (copy_to_user(argp, &tr, sizeof tr))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001847 goto out;
1848 r = 0;
1849 break;
1850 }
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001851 case KVM_SET_GUEST_DEBUG: {
1852 struct kvm_guest_debug dbg;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001853
1854 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00001855 if (copy_from_user(&dbg, argp, sizeof dbg))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001856 goto out;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01001857 r = kvm_arch_vcpu_ioctl_set_guest_debug(vcpu, &dbg);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001858 if (r)
1859 goto out;
1860 r = 0;
1861 break;
1862 }
Avi Kivity1961d272007-03-05 19:46:05 +02001863 case KVM_SET_SIGNAL_MASK: {
1864 struct kvm_signal_mask __user *sigmask_arg = argp;
1865 struct kvm_signal_mask kvm_sigmask;
1866 sigset_t sigset, *p;
1867
1868 p = NULL;
1869 if (argp) {
1870 r = -EFAULT;
1871 if (copy_from_user(&kvm_sigmask, argp,
1872 sizeof kvm_sigmask))
1873 goto out;
1874 r = -EINVAL;
1875 if (kvm_sigmask.len != sizeof sigset)
1876 goto out;
1877 r = -EFAULT;
1878 if (copy_from_user(&sigset, sigmask_arg->sigset,
1879 sizeof sigset))
1880 goto out;
1881 p = &sigset;
1882 }
1883 r = kvm_vcpu_ioctl_set_sigmask(vcpu, &sigset);
1884 break;
1885 }
Avi Kivityb8836732007-04-01 16:34:31 +03001886 case KVM_GET_FPU: {
Dave Hansenfa3795a2008-08-11 10:01:46 -07001887 fpu = kzalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1888 r = -ENOMEM;
1889 if (!fpu)
1890 goto out;
1891 r = kvm_arch_vcpu_ioctl_get_fpu(vcpu, fpu);
Avi Kivityb8836732007-04-01 16:34:31 +03001892 if (r)
1893 goto out;
1894 r = -EFAULT;
Dave Hansenfa3795a2008-08-11 10:01:46 -07001895 if (copy_to_user(argp, fpu, sizeof(struct kvm_fpu)))
Avi Kivityb8836732007-04-01 16:34:31 +03001896 goto out;
1897 r = 0;
1898 break;
1899 }
1900 case KVM_SET_FPU: {
Dave Hansenfa3795a2008-08-11 10:01:46 -07001901 fpu = kmalloc(sizeof(struct kvm_fpu), GFP_KERNEL);
1902 r = -ENOMEM;
1903 if (!fpu)
Avi Kivityb8836732007-04-01 16:34:31 +03001904 goto out;
Dave Hansenfa3795a2008-08-11 10:01:46 -07001905 r = -EFAULT;
1906 if (copy_from_user(fpu, argp, sizeof(struct kvm_fpu)))
1907 goto out;
1908 r = kvm_arch_vcpu_ioctl_set_fpu(vcpu, fpu);
Avi Kivityb8836732007-04-01 16:34:31 +03001909 if (r)
1910 goto out;
1911 r = 0;
1912 break;
1913 }
Avi Kivitybccf2152007-02-21 18:04:26 +02001914 default:
Carsten Otte313a3dc2007-10-11 19:16:52 +02001915 r = kvm_arch_vcpu_ioctl(filp, ioctl, arg);
Avi Kivitybccf2152007-02-21 18:04:26 +02001916 }
1917out:
Dave Hansenfa3795a2008-08-11 10:01:46 -07001918 kfree(fpu);
1919 kfree(kvm_sregs);
Avi Kivitybccf2152007-02-21 18:04:26 +02001920 return r;
1921}
1922
1923static long kvm_vm_ioctl(struct file *filp,
1924 unsigned int ioctl, unsigned long arg)
1925{
1926 struct kvm *kvm = filp->private_data;
1927 void __user *argp = (void __user *)arg;
Carsten Otte1fe779f2007-10-29 16:08:35 +01001928 int r;
Avi Kivitybccf2152007-02-21 18:04:26 +02001929
Avi Kivity6d4e4c42007-11-21 16:41:05 +02001930 if (kvm->mm != current->mm)
1931 return -EIO;
Avi Kivitybccf2152007-02-21 18:04:26 +02001932 switch (ioctl) {
1933 case KVM_CREATE_VCPU:
1934 r = kvm_vm_ioctl_create_vcpu(kvm, arg);
1935 if (r < 0)
1936 goto out;
1937 break;
Izik Eidus6fc138d2007-10-09 19:20:39 +02001938 case KVM_SET_USER_MEMORY_REGION: {
1939 struct kvm_userspace_memory_region kvm_userspace_mem;
1940
1941 r = -EFAULT;
1942 if (copy_from_user(&kvm_userspace_mem, argp,
1943 sizeof kvm_userspace_mem))
1944 goto out;
1945
1946 r = kvm_vm_ioctl_set_memory_region(kvm, &kvm_userspace_mem, 1);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001947 if (r)
1948 goto out;
1949 break;
1950 }
1951 case KVM_GET_DIRTY_LOG: {
1952 struct kvm_dirty_log log;
1953
1954 r = -EFAULT;
Al Viro2f366982007-02-09 16:38:35 +00001955 if (copy_from_user(&log, argp, sizeof log))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001956 goto out;
Avi Kivity2c6f5df2007-02-20 18:27:58 +02001957 r = kvm_vm_ioctl_get_dirty_log(kvm, &log);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001958 if (r)
1959 goto out;
1960 break;
1961 }
Laurent Vivier5f94c172008-05-30 16:05:54 +02001962#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
1963 case KVM_REGISTER_COALESCED_MMIO: {
1964 struct kvm_coalesced_mmio_zone zone;
1965 r = -EFAULT;
1966 if (copy_from_user(&zone, argp, sizeof zone))
1967 goto out;
1968 r = -ENXIO;
1969 r = kvm_vm_ioctl_register_coalesced_mmio(kvm, &zone);
1970 if (r)
1971 goto out;
1972 r = 0;
1973 break;
1974 }
1975 case KVM_UNREGISTER_COALESCED_MMIO: {
1976 struct kvm_coalesced_mmio_zone zone;
1977 r = -EFAULT;
1978 if (copy_from_user(&zone, argp, sizeof zone))
1979 goto out;
1980 r = -ENXIO;
1981 r = kvm_vm_ioctl_unregister_coalesced_mmio(kvm, &zone);
1982 if (r)
1983 goto out;
1984 r = 0;
1985 break;
1986 }
1987#endif
Xiantao Zhang8a98f662008-10-06 13:47:38 +08001988#ifdef KVM_CAP_DEVICE_ASSIGNMENT
1989 case KVM_ASSIGN_PCI_DEVICE: {
1990 struct kvm_assigned_pci_dev assigned_dev;
1991
1992 r = -EFAULT;
1993 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
1994 goto out;
1995 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
1996 if (r)
1997 goto out;
1998 break;
1999 }
2000 case KVM_ASSIGN_IRQ: {
2001 struct kvm_assigned_irq assigned_irq;
2002
2003 r = -EFAULT;
2004 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
2005 goto out;
2006 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
2007 if (r)
2008 goto out;
2009 break;
2010 }
2011#endif
Weidong Han0a920352008-12-02 21:24:23 +08002012#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
2013 case KVM_DEASSIGN_PCI_DEVICE: {
2014 struct kvm_assigned_pci_dev assigned_dev;
2015
2016 r = -EFAULT;
2017 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
2018 goto out;
2019 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
2020 if (r)
2021 goto out;
2022 break;
2023 }
2024#endif
Avi Kivity399ec802008-11-19 13:58:46 +02002025#ifdef KVM_CAP_IRQ_ROUTING
2026 case KVM_SET_GSI_ROUTING: {
2027 struct kvm_irq_routing routing;
2028 struct kvm_irq_routing __user *urouting;
2029 struct kvm_irq_routing_entry *entries;
2030
2031 r = -EFAULT;
2032 if (copy_from_user(&routing, argp, sizeof(routing)))
2033 goto out;
2034 r = -EINVAL;
2035 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
2036 goto out;
2037 if (routing.flags)
2038 goto out;
2039 r = -ENOMEM;
2040 entries = vmalloc(routing.nr * sizeof(*entries));
2041 if (!entries)
2042 goto out;
2043 r = -EFAULT;
2044 urouting = argp;
2045 if (copy_from_user(entries, urouting->entries,
2046 routing.nr * sizeof(*entries)))
2047 goto out_free_irq_routing;
2048 r = kvm_set_irq_routing(kvm, entries, routing.nr,
2049 routing.flags);
2050 out_free_irq_routing:
2051 vfree(entries);
2052 break;
2053 }
Sheng Yangc1e01512009-02-25 17:22:26 +08002054#ifdef __KVM_HAVE_MSIX
2055 case KVM_ASSIGN_SET_MSIX_NR: {
2056 struct kvm_assigned_msix_nr entry_nr;
2057 r = -EFAULT;
2058 if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
2059 goto out;
2060 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
2061 if (r)
2062 goto out;
2063 break;
2064 }
2065 case KVM_ASSIGN_SET_MSIX_ENTRY: {
2066 struct kvm_assigned_msix_entry entry;
2067 r = -EFAULT;
2068 if (copy_from_user(&entry, argp, sizeof entry))
2069 goto out;
2070 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
2071 if (r)
2072 goto out;
2073 break;
2074 }
Avi Kivity399ec802008-11-19 13:58:46 +02002075#endif
Sheng Yangc1e01512009-02-25 17:22:26 +08002076#endif /* KVM_CAP_IRQ_ROUTING */
Avi Kivityf17abe92007-02-21 19:28:04 +02002077 default:
Carsten Otte1fe779f2007-10-29 16:08:35 +01002078 r = kvm_arch_vm_ioctl(filp, ioctl, arg);
Avi Kivityf17abe92007-02-21 19:28:04 +02002079 }
2080out:
2081 return r;
2082}
2083
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11002084static int kvm_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
Avi Kivityf17abe92007-02-21 19:28:04 +02002085{
Marcelo Tosatti777b3f42008-09-16 20:54:46 -03002086 struct page *page[1];
2087 unsigned long addr;
2088 int npages;
2089 gfn_t gfn = vmf->pgoff;
Avi Kivityf17abe92007-02-21 19:28:04 +02002090 struct kvm *kvm = vma->vm_file->private_data;
Avi Kivityf17abe92007-02-21 19:28:04 +02002091
Marcelo Tosatti777b3f42008-09-16 20:54:46 -03002092 addr = gfn_to_hva(kvm, gfn);
2093 if (kvm_is_error_hva(addr))
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11002094 return VM_FAULT_SIGBUS;
Marcelo Tosatti777b3f42008-09-16 20:54:46 -03002095
2096 npages = get_user_pages(current, current->mm, addr, 1, 1, 0, page,
2097 NULL);
2098 if (unlikely(npages != 1))
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11002099 return VM_FAULT_SIGBUS;
Marcelo Tosatti777b3f42008-09-16 20:54:46 -03002100
2101 vmf->page = page[0];
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11002102 return 0;
Avi Kivityf17abe92007-02-21 19:28:04 +02002103}
2104
2105static struct vm_operations_struct kvm_vm_vm_ops = {
npiggin@suse.dee4a533a2007-12-05 18:15:52 +11002106 .fault = kvm_vm_fault,
Avi Kivityf17abe92007-02-21 19:28:04 +02002107};
2108
2109static int kvm_vm_mmap(struct file *file, struct vm_area_struct *vma)
2110{
2111 vma->vm_ops = &kvm_vm_vm_ops;
2112 return 0;
2113}
2114
Christian Borntraeger3d3aab12008-12-02 11:17:32 +01002115static struct file_operations kvm_vm_fops = {
Avi Kivityf17abe92007-02-21 19:28:04 +02002116 .release = kvm_vm_release,
2117 .unlocked_ioctl = kvm_vm_ioctl,
2118 .compat_ioctl = kvm_vm_ioctl,
2119 .mmap = kvm_vm_mmap,
2120};
2121
2122static int kvm_dev_ioctl_create_vm(void)
2123{
Al Viro2030a422008-02-23 06:46:49 -05002124 int fd;
Avi Kivityf17abe92007-02-21 19:28:04 +02002125 struct kvm *kvm;
2126
Avi Kivityf17abe92007-02-21 19:28:04 +02002127 kvm = kvm_create_vm();
Avi Kivityd6d28162007-06-28 08:38:16 -04002128 if (IS_ERR(kvm))
2129 return PTR_ERR(kvm);
Ulrich Drepper7d9dbca2008-07-23 21:29:22 -07002130 fd = anon_inode_getfd("kvm-vm", &kvm_vm_fops, kvm, 0);
Al Viro2030a422008-02-23 06:46:49 -05002131 if (fd < 0)
Al Viro66c0b392008-04-19 20:33:56 +01002132 kvm_put_kvm(kvm);
Avi Kivityf17abe92007-02-21 19:28:04 +02002133
Avi Kivityf17abe92007-02-21 19:28:04 +02002134 return fd;
Avi Kivityf17abe92007-02-21 19:28:04 +02002135}
2136
Avi Kivity1a811b62008-12-08 18:25:27 +02002137static long kvm_dev_ioctl_check_extension_generic(long arg)
2138{
2139 switch (arg) {
Avi Kivityca9edae2008-12-08 18:29:29 +02002140 case KVM_CAP_USER_MEMORY:
Avi Kivity1a811b62008-12-08 18:25:27 +02002141 case KVM_CAP_DESTROY_MEMORY_REGION_WORKS:
Jan Kiszka4cd481f2009-04-13 11:59:32 +02002142 case KVM_CAP_JOIN_MEMORY_REGIONS_WORKS:
Avi Kivity1a811b62008-12-08 18:25:27 +02002143 return 1;
Avi Kivity399ec802008-11-19 13:58:46 +02002144#ifdef CONFIG_HAVE_KVM_IRQCHIP
2145 case KVM_CAP_IRQ_ROUTING:
Sheng Yang36463142009-03-16 16:33:43 +08002146 return KVM_MAX_IRQ_ROUTES;
Avi Kivity399ec802008-11-19 13:58:46 +02002147#endif
Avi Kivity1a811b62008-12-08 18:25:27 +02002148 default:
2149 break;
2150 }
2151 return kvm_dev_ioctl_check_extension(arg);
2152}
2153
Avi Kivityf17abe92007-02-21 19:28:04 +02002154static long kvm_dev_ioctl(struct file *filp,
2155 unsigned int ioctl, unsigned long arg)
2156{
Avi Kivity07c45a32007-03-07 13:05:38 +02002157 long r = -EINVAL;
Avi Kivityf17abe92007-02-21 19:28:04 +02002158
2159 switch (ioctl) {
2160 case KVM_GET_API_VERSION:
Avi Kivityf0fe5102007-03-07 13:11:17 +02002161 r = -EINVAL;
2162 if (arg)
2163 goto out;
Avi Kivityf17abe92007-02-21 19:28:04 +02002164 r = KVM_API_VERSION;
2165 break;
2166 case KVM_CREATE_VM:
Avi Kivityf0fe5102007-03-07 13:11:17 +02002167 r = -EINVAL;
2168 if (arg)
2169 goto out;
Avi Kivityf17abe92007-02-21 19:28:04 +02002170 r = kvm_dev_ioctl_create_vm();
2171 break;
Zhang Xiantao018d00d2007-11-15 23:07:47 +08002172 case KVM_CHECK_EXTENSION:
Avi Kivity1a811b62008-12-08 18:25:27 +02002173 r = kvm_dev_ioctl_check_extension_generic(arg);
Avi Kivity5d308f42007-03-01 17:56:20 +02002174 break;
Avi Kivity07c45a32007-03-07 13:05:38 +02002175 case KVM_GET_VCPU_MMAP_SIZE:
2176 r = -EINVAL;
2177 if (arg)
2178 goto out;
Avi Kivityadb1ff42008-01-24 15:13:08 +02002179 r = PAGE_SIZE; /* struct kvm_run */
2180#ifdef CONFIG_X86
2181 r += PAGE_SIZE; /* pio data page */
2182#endif
Laurent Vivier5f94c172008-05-30 16:05:54 +02002183#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
2184 r += PAGE_SIZE; /* coalesced mmio ring page */
2185#endif
Avi Kivity07c45a32007-03-07 13:05:38 +02002186 break;
Feng(Eric) Liud4c9ff22008-04-10 08:47:53 -04002187 case KVM_TRACE_ENABLE:
2188 case KVM_TRACE_PAUSE:
2189 case KVM_TRACE_DISABLE:
2190 r = kvm_trace_ioctl(ioctl, arg);
2191 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002192 default:
Carsten Otte043405e2007-10-10 17:16:19 +02002193 return kvm_arch_dev_ioctl(filp, ioctl, arg);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002194 }
2195out:
2196 return r;
2197}
2198
Avi Kivity6aa8b732006-12-10 02:21:36 -08002199static struct file_operations kvm_chardev_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08002200 .unlocked_ioctl = kvm_dev_ioctl,
2201 .compat_ioctl = kvm_dev_ioctl,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002202};
2203
2204static struct miscdevice kvm_dev = {
Avi Kivitybbe44322007-03-04 13:27:36 +02002205 KVM_MINOR,
Avi Kivity6aa8b732006-12-10 02:21:36 -08002206 "kvm",
2207 &kvm_chardev_ops,
2208};
2209
Avi Kivity1b6c0162007-05-24 13:03:52 +03002210static void hardware_enable(void *junk)
2211{
2212 int cpu = raw_smp_processor_id();
2213
Rusty Russell7f59f492008-12-07 21:25:45 +10302214 if (cpumask_test_cpu(cpu, cpus_hardware_enabled))
Avi Kivity1b6c0162007-05-24 13:03:52 +03002215 return;
Rusty Russell7f59f492008-12-07 21:25:45 +10302216 cpumask_set_cpu(cpu, cpus_hardware_enabled);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08002217 kvm_arch_hardware_enable(NULL);
Avi Kivity1b6c0162007-05-24 13:03:52 +03002218}
2219
2220static void hardware_disable(void *junk)
2221{
2222 int cpu = raw_smp_processor_id();
2223
Rusty Russell7f59f492008-12-07 21:25:45 +10302224 if (!cpumask_test_cpu(cpu, cpus_hardware_enabled))
Avi Kivity1b6c0162007-05-24 13:03:52 +03002225 return;
Rusty Russell7f59f492008-12-07 21:25:45 +10302226 cpumask_clear_cpu(cpu, cpus_hardware_enabled);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08002227 kvm_arch_hardware_disable(NULL);
Avi Kivity1b6c0162007-05-24 13:03:52 +03002228}
2229
Avi Kivity774c47f2007-02-12 00:54:47 -08002230static int kvm_cpu_hotplug(struct notifier_block *notifier, unsigned long val,
2231 void *v)
2232{
2233 int cpu = (long)v;
2234
Avi Kivity1a6f4d72007-11-11 18:37:32 +02002235 val &= ~CPU_TASKS_FROZEN;
Avi Kivity774c47f2007-02-12 00:54:47 -08002236 switch (val) {
Avi Kivitycec9ad22007-05-24 13:11:41 +03002237 case CPU_DYING:
Avi Kivity6ec8a852007-08-19 15:57:26 +03002238 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2239 cpu);
2240 hardware_disable(NULL);
2241 break;
Avi Kivity774c47f2007-02-12 00:54:47 -08002242 case CPU_UP_CANCELED:
Jeremy Katz43934a32007-02-19 14:37:46 +02002243 printk(KERN_INFO "kvm: disabling virtualization on CPU%d\n",
2244 cpu);
Jens Axboe8691e5a2008-06-06 11:18:06 +02002245 smp_call_function_single(cpu, hardware_disable, NULL, 1);
Avi Kivity774c47f2007-02-12 00:54:47 -08002246 break;
Jeremy Katz43934a32007-02-19 14:37:46 +02002247 case CPU_ONLINE:
2248 printk(KERN_INFO "kvm: enabling virtualization on CPU%d\n",
2249 cpu);
Jens Axboe8691e5a2008-06-06 11:18:06 +02002250 smp_call_function_single(cpu, hardware_enable, NULL, 1);
Avi Kivity774c47f2007-02-12 00:54:47 -08002251 break;
2252 }
2253 return NOTIFY_OK;
2254}
2255
Avi Kivity4ecac3f2008-05-13 13:23:38 +03002256
2257asmlinkage void kvm_handle_fault_on_reboot(void)
2258{
2259 if (kvm_rebooting)
2260 /* spin while reset goes on */
2261 while (true)
2262 ;
2263 /* Fault while not rebooting. We want the trace. */
2264 BUG();
2265}
2266EXPORT_SYMBOL_GPL(kvm_handle_fault_on_reboot);
2267
Rusty Russell9a2b85c2007-07-17 23:17:55 +10002268static int kvm_reboot(struct notifier_block *notifier, unsigned long val,
Mike Dayd77c26f2007-10-08 09:02:08 -04002269 void *v)
Rusty Russell9a2b85c2007-07-17 23:17:55 +10002270{
2271 if (val == SYS_RESTART) {
2272 /*
2273 * Some (well, at least mine) BIOSes hang on reboot if
2274 * in vmx root mode.
2275 */
2276 printk(KERN_INFO "kvm: exiting hardware virtualization\n");
Avi Kivity4ecac3f2008-05-13 13:23:38 +03002277 kvm_rebooting = true;
Jens Axboe15c8b6c2008-05-09 09:39:44 +02002278 on_each_cpu(hardware_disable, NULL, 1);
Rusty Russell9a2b85c2007-07-17 23:17:55 +10002279 }
2280 return NOTIFY_OK;
2281}
2282
2283static struct notifier_block kvm_reboot_notifier = {
2284 .notifier_call = kvm_reboot,
2285 .priority = 0,
2286};
2287
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04002288void kvm_io_bus_init(struct kvm_io_bus *bus)
2289{
2290 memset(bus, 0, sizeof(*bus));
2291}
2292
2293void kvm_io_bus_destroy(struct kvm_io_bus *bus)
2294{
2295 int i;
2296
2297 for (i = 0; i < bus->dev_count; i++) {
2298 struct kvm_io_device *pos = bus->devs[i];
2299
2300 kvm_iodevice_destructor(pos);
2301 }
2302}
2303
Laurent Vivier92760492008-05-30 16:05:53 +02002304struct kvm_io_device *kvm_io_bus_find_dev(struct kvm_io_bus *bus,
2305 gpa_t addr, int len, int is_write)
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04002306{
2307 int i;
2308
2309 for (i = 0; i < bus->dev_count; i++) {
2310 struct kvm_io_device *pos = bus->devs[i];
2311
Laurent Vivier92760492008-05-30 16:05:53 +02002312 if (pos->in_range(pos, addr, len, is_write))
Gregory Haskins2eeb2e92007-05-31 14:08:53 -04002313 return pos;
2314 }
2315
2316 return NULL;
2317}
2318
2319void kvm_io_bus_register_dev(struct kvm_io_bus *bus, struct kvm_io_device *dev)
2320{
2321 BUG_ON(bus->dev_count > (NR_IOBUS_DEVS-1));
2322
2323 bus->devs[bus->dev_count++] = dev;
2324}
2325
Avi Kivity774c47f2007-02-12 00:54:47 -08002326static struct notifier_block kvm_cpu_notifier = {
2327 .notifier_call = kvm_cpu_hotplug,
2328 .priority = 20, /* must be > scheduler priority */
2329};
2330
Christoph Hellwig8b88b092008-02-08 04:20:26 -08002331static int vm_stat_get(void *_offset, u64 *val)
Avi Kivityba1389b2007-11-18 16:24:12 +02002332{
2333 unsigned offset = (long)_offset;
Avi Kivityba1389b2007-11-18 16:24:12 +02002334 struct kvm *kvm;
2335
Christoph Hellwig8b88b092008-02-08 04:20:26 -08002336 *val = 0;
Avi Kivityba1389b2007-11-18 16:24:12 +02002337 spin_lock(&kvm_lock);
2338 list_for_each_entry(kvm, &vm_list, vm_list)
Christoph Hellwig8b88b092008-02-08 04:20:26 -08002339 *val += *(u32 *)((void *)kvm + offset);
Avi Kivityba1389b2007-11-18 16:24:12 +02002340 spin_unlock(&kvm_lock);
Christoph Hellwig8b88b092008-02-08 04:20:26 -08002341 return 0;
Avi Kivityba1389b2007-11-18 16:24:12 +02002342}
2343
2344DEFINE_SIMPLE_ATTRIBUTE(vm_stat_fops, vm_stat_get, NULL, "%llu\n");
2345
Christoph Hellwig8b88b092008-02-08 04:20:26 -08002346static int vcpu_stat_get(void *_offset, u64 *val)
Avi Kivity1165f5f2007-04-19 17:27:43 +03002347{
2348 unsigned offset = (long)_offset;
Avi Kivity1165f5f2007-04-19 17:27:43 +03002349 struct kvm *kvm;
2350 struct kvm_vcpu *vcpu;
2351 int i;
2352
Christoph Hellwig8b88b092008-02-08 04:20:26 -08002353 *val = 0;
Avi Kivity1165f5f2007-04-19 17:27:43 +03002354 spin_lock(&kvm_lock);
2355 list_for_each_entry(kvm, &vm_list, vm_list)
2356 for (i = 0; i < KVM_MAX_VCPUS; ++i) {
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002357 vcpu = kvm->vcpus[i];
2358 if (vcpu)
Christoph Hellwig8b88b092008-02-08 04:20:26 -08002359 *val += *(u32 *)((void *)vcpu + offset);
Avi Kivity1165f5f2007-04-19 17:27:43 +03002360 }
2361 spin_unlock(&kvm_lock);
Christoph Hellwig8b88b092008-02-08 04:20:26 -08002362 return 0;
Avi Kivity1165f5f2007-04-19 17:27:43 +03002363}
2364
Avi Kivityba1389b2007-11-18 16:24:12 +02002365DEFINE_SIMPLE_ATTRIBUTE(vcpu_stat_fops, vcpu_stat_get, NULL, "%llu\n");
2366
2367static struct file_operations *stat_fops[] = {
2368 [KVM_STAT_VCPU] = &vcpu_stat_fops,
2369 [KVM_STAT_VM] = &vm_stat_fops,
2370};
Avi Kivity1165f5f2007-04-19 17:27:43 +03002371
Zhang Xiantaoa16b0432007-11-16 14:38:21 +08002372static void kvm_init_debug(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002373{
2374 struct kvm_stats_debugfs_item *p;
2375
Hollis Blanchard76f7c872008-04-15 16:05:42 -05002376 kvm_debugfs_dir = debugfs_create_dir("kvm", NULL);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002377 for (p = debugfs_entries; p->name; ++p)
Hollis Blanchard76f7c872008-04-15 16:05:42 -05002378 p->dentry = debugfs_create_file(p->name, 0444, kvm_debugfs_dir,
Avi Kivity1165f5f2007-04-19 17:27:43 +03002379 (void *)(long)p->offset,
Avi Kivityba1389b2007-11-18 16:24:12 +02002380 stat_fops[p->kind]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002381}
2382
2383static void kvm_exit_debug(void)
2384{
2385 struct kvm_stats_debugfs_item *p;
2386
2387 for (p = debugfs_entries; p->name; ++p)
2388 debugfs_remove(p->dentry);
Hollis Blanchard76f7c872008-04-15 16:05:42 -05002389 debugfs_remove(kvm_debugfs_dir);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002390}
2391
Avi Kivity59ae6c62007-02-12 00:54:48 -08002392static int kvm_suspend(struct sys_device *dev, pm_message_t state)
2393{
Avi Kivity4267c412007-05-24 13:09:41 +03002394 hardware_disable(NULL);
Avi Kivity59ae6c62007-02-12 00:54:48 -08002395 return 0;
2396}
2397
2398static int kvm_resume(struct sys_device *dev)
2399{
Avi Kivity4267c412007-05-24 13:09:41 +03002400 hardware_enable(NULL);
Avi Kivity59ae6c62007-02-12 00:54:48 -08002401 return 0;
2402}
2403
2404static struct sysdev_class kvm_sysdev_class = {
Kay Sieversaf5ca3f2007-12-20 02:09:39 +01002405 .name = "kvm",
Avi Kivity59ae6c62007-02-12 00:54:48 -08002406 .suspend = kvm_suspend,
2407 .resume = kvm_resume,
2408};
2409
2410static struct sys_device kvm_sysdev = {
2411 .id = 0,
2412 .cls = &kvm_sysdev_class,
2413};
2414
Izik Eiduscea7bb22007-10-17 19:17:48 +02002415struct page *bad_page;
Anthony Liguori35149e22008-04-02 14:46:56 -05002416pfn_t bad_pfn;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002417
Avi Kivity15ad7142007-07-11 18:17:21 +03002418static inline
2419struct kvm_vcpu *preempt_notifier_to_vcpu(struct preempt_notifier *pn)
2420{
2421 return container_of(pn, struct kvm_vcpu, preempt_notifier);
2422}
2423
2424static void kvm_sched_in(struct preempt_notifier *pn, int cpu)
2425{
2426 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2427
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08002428 kvm_arch_vcpu_load(vcpu, cpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03002429}
2430
2431static void kvm_sched_out(struct preempt_notifier *pn,
2432 struct task_struct *next)
2433{
2434 struct kvm_vcpu *vcpu = preempt_notifier_to_vcpu(pn);
2435
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08002436 kvm_arch_vcpu_put(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03002437}
2438
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08002439int kvm_init(void *opaque, unsigned int vcpu_size,
Rusty Russellc16f8622007-07-30 21:12:19 +10002440 struct module *module)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002441{
2442 int r;
Yang, Sheng002c7f72007-07-31 14:23:01 +03002443 int cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002444
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002445 kvm_init_debug();
2446
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08002447 r = kvm_arch_init(opaque);
2448 if (r)
Zhang Xiantaod23087842007-11-29 15:35:39 +08002449 goto out_fail;
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002450
2451 bad_page = alloc_page(GFP_KERNEL | __GFP_ZERO);
2452
2453 if (bad_page == NULL) {
2454 r = -ENOMEM;
2455 goto out;
2456 }
2457
Anthony Liguori35149e22008-04-02 14:46:56 -05002458 bad_pfn = page_to_pfn(bad_page);
2459
Avi Kivity8437a612009-06-06 14:52:35 -07002460 if (!zalloc_cpumask_var(&cpus_hardware_enabled, GFP_KERNEL)) {
Rusty Russell7f59f492008-12-07 21:25:45 +10302461 r = -ENOMEM;
2462 goto out_free_0;
2463 }
Avi Kivitya4c03642009-06-06 12:34:39 +03002464 cpumask_clear(cpus_hardware_enabled);
Rusty Russell7f59f492008-12-07 21:25:45 +10302465
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08002466 r = kvm_arch_hardware_setup();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002467 if (r < 0)
Rusty Russell7f59f492008-12-07 21:25:45 +10302468 goto out_free_0a;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002469
Yang, Sheng002c7f72007-07-31 14:23:01 +03002470 for_each_online_cpu(cpu) {
2471 smp_call_function_single(cpu,
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08002472 kvm_arch_check_processor_compat,
Jens Axboe8691e5a2008-06-06 11:18:06 +02002473 &r, 1);
Yang, Sheng002c7f72007-07-31 14:23:01 +03002474 if (r < 0)
Zhang Xiantaod23087842007-11-29 15:35:39 +08002475 goto out_free_1;
Yang, Sheng002c7f72007-07-31 14:23:01 +03002476 }
2477
Jens Axboe15c8b6c2008-05-09 09:39:44 +02002478 on_each_cpu(hardware_enable, NULL, 1);
Avi Kivity774c47f2007-02-12 00:54:47 -08002479 r = register_cpu_notifier(&kvm_cpu_notifier);
2480 if (r)
Zhang Xiantaod23087842007-11-29 15:35:39 +08002481 goto out_free_2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002482 register_reboot_notifier(&kvm_reboot_notifier);
2483
Avi Kivity59ae6c62007-02-12 00:54:48 -08002484 r = sysdev_class_register(&kvm_sysdev_class);
2485 if (r)
Zhang Xiantaod23087842007-11-29 15:35:39 +08002486 goto out_free_3;
Avi Kivity59ae6c62007-02-12 00:54:48 -08002487
2488 r = sysdev_register(&kvm_sysdev);
2489 if (r)
Zhang Xiantaod23087842007-11-29 15:35:39 +08002490 goto out_free_4;
Avi Kivity59ae6c62007-02-12 00:54:48 -08002491
Rusty Russellc16f8622007-07-30 21:12:19 +10002492 /* A kmem cache lets us meet the alignment requirements of fx_save. */
2493 kvm_vcpu_cache = kmem_cache_create("kvm_vcpu", vcpu_size,
Joe Perches56919c52007-11-12 20:06:51 -08002494 __alignof__(struct kvm_vcpu),
2495 0, NULL);
Rusty Russellc16f8622007-07-30 21:12:19 +10002496 if (!kvm_vcpu_cache) {
2497 r = -ENOMEM;
Zhang Xiantaod23087842007-11-29 15:35:39 +08002498 goto out_free_5;
Rusty Russellc16f8622007-07-30 21:12:19 +10002499 }
2500
Avi Kivity6aa8b732006-12-10 02:21:36 -08002501 kvm_chardev_ops.owner = module;
Christian Borntraeger3d3aab12008-12-02 11:17:32 +01002502 kvm_vm_fops.owner = module;
2503 kvm_vcpu_fops.owner = module;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002504
2505 r = misc_register(&kvm_dev);
2506 if (r) {
Mike Dayd77c26f2007-10-08 09:02:08 -04002507 printk(KERN_ERR "kvm: misc device register failed\n");
Avi Kivity6aa8b732006-12-10 02:21:36 -08002508 goto out_free;
2509 }
2510
Avi Kivity15ad7142007-07-11 18:17:21 +03002511 kvm_preempt_ops.sched_in = kvm_sched_in;
2512 kvm_preempt_ops.sched_out = kvm_sched_out;
Sheng Yang5319c662008-11-24 14:32:57 +08002513#ifndef CONFIG_X86
2514 msi2intx = 0;
2515#endif
Avi Kivity15ad7142007-07-11 18:17:21 +03002516
Avi Kivityc7addb92007-09-16 18:58:32 +02002517 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002518
2519out_free:
Rusty Russellc16f8622007-07-30 21:12:19 +10002520 kmem_cache_destroy(kvm_vcpu_cache);
Zhang Xiantaod23087842007-11-29 15:35:39 +08002521out_free_5:
Avi Kivity59ae6c62007-02-12 00:54:48 -08002522 sysdev_unregister(&kvm_sysdev);
Zhang Xiantaod23087842007-11-29 15:35:39 +08002523out_free_4:
Avi Kivity59ae6c62007-02-12 00:54:48 -08002524 sysdev_class_unregister(&kvm_sysdev_class);
Zhang Xiantaod23087842007-11-29 15:35:39 +08002525out_free_3:
Avi Kivity6aa8b732006-12-10 02:21:36 -08002526 unregister_reboot_notifier(&kvm_reboot_notifier);
Avi Kivity774c47f2007-02-12 00:54:47 -08002527 unregister_cpu_notifier(&kvm_cpu_notifier);
Zhang Xiantaod23087842007-11-29 15:35:39 +08002528out_free_2:
Jens Axboe15c8b6c2008-05-09 09:39:44 +02002529 on_each_cpu(hardware_disable, NULL, 1);
Zhang Xiantaod23087842007-11-29 15:35:39 +08002530out_free_1:
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08002531 kvm_arch_hardware_unsetup();
Rusty Russell7f59f492008-12-07 21:25:45 +10302532out_free_0a:
2533 free_cpumask_var(cpus_hardware_enabled);
Zhang Xiantaod23087842007-11-29 15:35:39 +08002534out_free_0:
2535 __free_page(bad_page);
Avi Kivityca45aaa2007-03-01 19:21:03 +02002536out:
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08002537 kvm_arch_exit();
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002538 kvm_exit_debug();
Zhang Xiantaod23087842007-11-29 15:35:39 +08002539out_fail:
Avi Kivity6aa8b732006-12-10 02:21:36 -08002540 return r;
2541}
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002542EXPORT_SYMBOL_GPL(kvm_init);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002543
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002544void kvm_exit(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002545{
Feng(Eric) Liud4c9ff22008-04-10 08:47:53 -04002546 kvm_trace_cleanup();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002547 misc_deregister(&kvm_dev);
Rusty Russellc16f8622007-07-30 21:12:19 +10002548 kmem_cache_destroy(kvm_vcpu_cache);
Avi Kivity59ae6c62007-02-12 00:54:48 -08002549 sysdev_unregister(&kvm_sysdev);
2550 sysdev_class_unregister(&kvm_sysdev_class);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002551 unregister_reboot_notifier(&kvm_reboot_notifier);
Avi Kivity59ae6c62007-02-12 00:54:48 -08002552 unregister_cpu_notifier(&kvm_cpu_notifier);
Jens Axboe15c8b6c2008-05-09 09:39:44 +02002553 on_each_cpu(hardware_disable, NULL, 1);
Zhang Xiantaoe9b11c12007-11-14 20:38:21 +08002554 kvm_arch_hardware_unsetup();
Zhang Xiantaof8c16bb2007-11-14 20:40:21 +08002555 kvm_arch_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002556 kvm_exit_debug();
Rusty Russell7f59f492008-12-07 21:25:45 +10302557 free_cpumask_var(cpus_hardware_enabled);
Izik Eiduscea7bb22007-10-17 19:17:48 +02002558 __free_page(bad_page);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002559}
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08002560EXPORT_SYMBOL_GPL(kvm_exit);