blob: bf9d958d4d5b6fe328226a788db2c85999cab20b [file] [log] [blame]
Avi Kivitybfd99ff2009-08-26 14:57:50 +03001/*
2 * Kernel-based Virtual Machine - device assignment support
3 *
Avi Kivity221d0592010-05-23 18:37:00 +03004 * Copyright (C) 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivitybfd99ff2009-08-26 14:57:50 +03005 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
11#include <linux/kvm_host.h>
12#include <linux/kvm.h>
13#include <linux/uaccess.h>
14#include <linux/vmalloc.h>
15#include <linux/errno.h>
16#include <linux/spinlock.h>
17#include <linux/pci.h>
18#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Avi Kivitybfd99ff2009-08-26 14:57:50 +030020#include "irq.h"
21
22static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
23 int assigned_dev_id)
24{
25 struct list_head *ptr;
26 struct kvm_assigned_dev_kernel *match;
27
28 list_for_each(ptr, head) {
29 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
30 if (match->assigned_dev_id == assigned_dev_id)
31 return match;
32 }
33 return NULL;
34}
35
36static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
37 *assigned_dev, int irq)
38{
39 int i, index;
40 struct msix_entry *host_msix_entries;
41
42 host_msix_entries = assigned_dev->host_msix_entries;
43
44 index = -1;
45 for (i = 0; i < assigned_dev->entries_nr; i++)
46 if (irq == host_msix_entries[i].vector) {
47 index = i;
48 break;
49 }
50 if (index < 0) {
51 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
52 return 0;
53 }
54
55 return index;
56}
57
58static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
59{
60 struct kvm_assigned_dev_kernel *assigned_dev;
61 struct kvm *kvm;
62 int i;
63
64 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
65 interrupt_work);
66 kvm = assigned_dev->kvm;
67
68 spin_lock_irq(&assigned_dev->assigned_dev_lock);
69 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
70 struct kvm_guest_msix_entry *guest_entries =
71 assigned_dev->guest_msix_entries;
72 for (i = 0; i < assigned_dev->entries_nr; i++) {
73 if (!(guest_entries[i].flags &
74 KVM_ASSIGNED_MSIX_PENDING))
75 continue;
76 guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
77 kvm_set_irq(assigned_dev->kvm,
78 assigned_dev->irq_source_id,
79 guest_entries[i].vector, 1);
80 }
81 } else
82 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
83 assigned_dev->guest_irq, 1);
84
85 spin_unlock_irq(&assigned_dev->assigned_dev_lock);
86}
87
88static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
89{
90 unsigned long flags;
91 struct kvm_assigned_dev_kernel *assigned_dev =
92 (struct kvm_assigned_dev_kernel *) dev_id;
93
94 spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
95 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
96 int index = find_index_from_host_irq(assigned_dev, irq);
97 if (index < 0)
98 goto out;
99 assigned_dev->guest_msix_entries[index].flags |=
100 KVM_ASSIGNED_MSIX_PENDING;
101 }
102
103 schedule_work(&assigned_dev->interrupt_work);
104
105 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
106 disable_irq_nosync(irq);
107 assigned_dev->host_irq_disabled = true;
108 }
109
110out:
111 spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
112 return IRQ_HANDLED;
113}
114
115/* Ack the irq line for an assigned device */
116static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
117{
118 struct kvm_assigned_dev_kernel *dev;
119 unsigned long flags;
120
121 if (kian->gsi == -1)
122 return;
123
124 dev = container_of(kian, struct kvm_assigned_dev_kernel,
125 ack_notifier);
126
127 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
128
129 /* The guest irq may be shared so this ack may be
130 * from another device.
131 */
132 spin_lock_irqsave(&dev->assigned_dev_lock, flags);
133 if (dev->host_irq_disabled) {
134 enable_irq(dev->host_irq);
135 dev->host_irq_disabled = false;
136 }
137 spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
138}
139
140static void deassign_guest_irq(struct kvm *kvm,
141 struct kvm_assigned_dev_kernel *assigned_dev)
142{
143 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
144 assigned_dev->ack_notifier.gsi = -1;
145
146 if (assigned_dev->irq_source_id != -1)
147 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
148 assigned_dev->irq_source_id = -1;
149 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
150}
151
152/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
153static void deassign_host_irq(struct kvm *kvm,
154 struct kvm_assigned_dev_kernel *assigned_dev)
155{
156 /*
157 * In kvm_free_device_irq, cancel_work_sync return true if:
158 * 1. work is scheduled, and then cancelled.
159 * 2. work callback is executed.
160 *
161 * The first one ensured that the irq is disabled and no more events
162 * would happen. But for the second one, the irq may be enabled (e.g.
163 * for MSI). So we disable irq here to prevent further events.
164 *
165 * Notice this maybe result in nested disable if the interrupt type is
166 * INTx, but it's OK for we are going to free it.
167 *
168 * If this function is a part of VM destroy, please ensure that till
169 * now, the kvm state is still legal for probably we also have to wait
170 * interrupt_work done.
171 */
172 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
173 int i;
174 for (i = 0; i < assigned_dev->entries_nr; i++)
175 disable_irq_nosync(assigned_dev->
176 host_msix_entries[i].vector);
177
178 cancel_work_sync(&assigned_dev->interrupt_work);
179
180 for (i = 0; i < assigned_dev->entries_nr; i++)
181 free_irq(assigned_dev->host_msix_entries[i].vector,
182 (void *)assigned_dev);
183
184 assigned_dev->entries_nr = 0;
185 kfree(assigned_dev->host_msix_entries);
186 kfree(assigned_dev->guest_msix_entries);
187 pci_disable_msix(assigned_dev->dev);
188 } else {
189 /* Deal with MSI and INTx */
190 disable_irq_nosync(assigned_dev->host_irq);
191 cancel_work_sync(&assigned_dev->interrupt_work);
192
193 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
194
195 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
196 pci_disable_msi(assigned_dev->dev);
197 }
198
199 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
200}
201
202static int kvm_deassign_irq(struct kvm *kvm,
203 struct kvm_assigned_dev_kernel *assigned_dev,
204 unsigned long irq_requested_type)
205{
206 unsigned long guest_irq_type, host_irq_type;
207
208 if (!irqchip_in_kernel(kvm))
209 return -EINVAL;
210 /* no irq assignment to deassign */
211 if (!assigned_dev->irq_requested_type)
212 return -ENXIO;
213
214 host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
215 guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
216
217 if (host_irq_type)
218 deassign_host_irq(kvm, assigned_dev);
219 if (guest_irq_type)
220 deassign_guest_irq(kvm, assigned_dev);
221
222 return 0;
223}
224
225static void kvm_free_assigned_irq(struct kvm *kvm,
226 struct kvm_assigned_dev_kernel *assigned_dev)
227{
228 kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
229}
230
231static void kvm_free_assigned_device(struct kvm *kvm,
232 struct kvm_assigned_dev_kernel
233 *assigned_dev)
234{
235 kvm_free_assigned_irq(kvm, assigned_dev);
236
237 pci_reset_function(assigned_dev->dev);
238
239 pci_release_regions(assigned_dev->dev);
240 pci_disable_device(assigned_dev->dev);
241 pci_dev_put(assigned_dev->dev);
242
243 list_del(&assigned_dev->list);
244 kfree(assigned_dev);
245}
246
247void kvm_free_all_assigned_devices(struct kvm *kvm)
248{
249 struct list_head *ptr, *ptr2;
250 struct kvm_assigned_dev_kernel *assigned_dev;
251
252 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
253 assigned_dev = list_entry(ptr,
254 struct kvm_assigned_dev_kernel,
255 list);
256
257 kvm_free_assigned_device(kvm, assigned_dev);
258 }
259}
260
261static int assigned_device_enable_host_intx(struct kvm *kvm,
262 struct kvm_assigned_dev_kernel *dev)
263{
264 dev->host_irq = dev->dev->irq;
265 /* Even though this is PCI, we don't want to use shared
266 * interrupts. Sharing host devices with guest-assigned devices
267 * on the same interrupt line is not a happy situation: there
268 * are going to be long delays in accepting, acking, etc.
269 */
270 if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
271 0, "kvm_assigned_intx_device", (void *)dev))
272 return -EIO;
273 return 0;
274}
275
276#ifdef __KVM_HAVE_MSI
277static int assigned_device_enable_host_msi(struct kvm *kvm,
278 struct kvm_assigned_dev_kernel *dev)
279{
280 int r;
281
282 if (!dev->dev->msi_enabled) {
283 r = pci_enable_msi(dev->dev);
284 if (r)
285 return r;
286 }
287
288 dev->host_irq = dev->dev->irq;
289 if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
290 "kvm_assigned_msi_device", (void *)dev)) {
291 pci_disable_msi(dev->dev);
292 return -EIO;
293 }
294
295 return 0;
296}
297#endif
298
299#ifdef __KVM_HAVE_MSIX
300static int assigned_device_enable_host_msix(struct kvm *kvm,
301 struct kvm_assigned_dev_kernel *dev)
302{
303 int i, r = -EINVAL;
304
305 /* host_msix_entries and guest_msix_entries should have been
306 * initialized */
307 if (dev->entries_nr == 0)
308 return r;
309
310 r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
311 if (r)
312 return r;
313
314 for (i = 0; i < dev->entries_nr; i++) {
315 r = request_irq(dev->host_msix_entries[i].vector,
316 kvm_assigned_dev_intr, 0,
317 "kvm_assigned_msix_device",
318 (void *)dev);
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300319 if (r)
jing zhangd57e2c02010-03-13 15:00:45 +0800320 goto err;
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300321 }
322
323 return 0;
jing zhangd57e2c02010-03-13 15:00:45 +0800324err:
325 for (i -= 1; i >= 0; i--)
326 free_irq(dev->host_msix_entries[i].vector, (void *)dev);
327 pci_disable_msix(dev->dev);
328 return r;
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300329}
330
331#endif
332
333static int assigned_device_enable_guest_intx(struct kvm *kvm,
334 struct kvm_assigned_dev_kernel *dev,
335 struct kvm_assigned_irq *irq)
336{
337 dev->guest_irq = irq->guest_irq;
338 dev->ack_notifier.gsi = irq->guest_irq;
339 return 0;
340}
341
342#ifdef __KVM_HAVE_MSI
343static int assigned_device_enable_guest_msi(struct kvm *kvm,
344 struct kvm_assigned_dev_kernel *dev,
345 struct kvm_assigned_irq *irq)
346{
347 dev->guest_irq = irq->guest_irq;
348 dev->ack_notifier.gsi = -1;
349 dev->host_irq_disabled = false;
350 return 0;
351}
352#endif
353
354#ifdef __KVM_HAVE_MSIX
355static int assigned_device_enable_guest_msix(struct kvm *kvm,
356 struct kvm_assigned_dev_kernel *dev,
357 struct kvm_assigned_irq *irq)
358{
359 dev->guest_irq = irq->guest_irq;
360 dev->ack_notifier.gsi = -1;
361 dev->host_irq_disabled = false;
362 return 0;
363}
364#endif
365
366static int assign_host_irq(struct kvm *kvm,
367 struct kvm_assigned_dev_kernel *dev,
368 __u32 host_irq_type)
369{
370 int r = -EEXIST;
371
372 if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
373 return r;
374
375 switch (host_irq_type) {
376 case KVM_DEV_IRQ_HOST_INTX:
377 r = assigned_device_enable_host_intx(kvm, dev);
378 break;
379#ifdef __KVM_HAVE_MSI
380 case KVM_DEV_IRQ_HOST_MSI:
381 r = assigned_device_enable_host_msi(kvm, dev);
382 break;
383#endif
384#ifdef __KVM_HAVE_MSIX
385 case KVM_DEV_IRQ_HOST_MSIX:
386 r = assigned_device_enable_host_msix(kvm, dev);
387 break;
388#endif
389 default:
390 r = -EINVAL;
391 }
392
393 if (!r)
394 dev->irq_requested_type |= host_irq_type;
395
396 return r;
397}
398
399static int assign_guest_irq(struct kvm *kvm,
400 struct kvm_assigned_dev_kernel *dev,
401 struct kvm_assigned_irq *irq,
402 unsigned long guest_irq_type)
403{
404 int id;
405 int r = -EEXIST;
406
407 if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
408 return r;
409
410 id = kvm_request_irq_source_id(kvm);
411 if (id < 0)
412 return id;
413
414 dev->irq_source_id = id;
415
416 switch (guest_irq_type) {
417 case KVM_DEV_IRQ_GUEST_INTX:
418 r = assigned_device_enable_guest_intx(kvm, dev, irq);
419 break;
420#ifdef __KVM_HAVE_MSI
421 case KVM_DEV_IRQ_GUEST_MSI:
422 r = assigned_device_enable_guest_msi(kvm, dev, irq);
423 break;
424#endif
425#ifdef __KVM_HAVE_MSIX
426 case KVM_DEV_IRQ_GUEST_MSIX:
427 r = assigned_device_enable_guest_msix(kvm, dev, irq);
428 break;
429#endif
430 default:
431 r = -EINVAL;
432 }
433
434 if (!r) {
435 dev->irq_requested_type |= guest_irq_type;
436 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
437 } else
438 kvm_free_irq_source_id(kvm, dev->irq_source_id);
439
440 return r;
441}
442
443/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
444static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
445 struct kvm_assigned_irq *assigned_irq)
446{
447 int r = -EINVAL;
448 struct kvm_assigned_dev_kernel *match;
449 unsigned long host_irq_type, guest_irq_type;
450
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300451 if (!irqchip_in_kernel(kvm))
452 return r;
453
454 mutex_lock(&kvm->lock);
455 r = -ENODEV;
456 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
457 assigned_irq->assigned_dev_id);
458 if (!match)
459 goto out;
460
461 host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
462 guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
463
464 r = -EINVAL;
465 /* can only assign one type at a time */
466 if (hweight_long(host_irq_type) > 1)
467 goto out;
468 if (hweight_long(guest_irq_type) > 1)
469 goto out;
470 if (host_irq_type == 0 && guest_irq_type == 0)
471 goto out;
472
473 r = 0;
474 if (host_irq_type)
475 r = assign_host_irq(kvm, match, host_irq_type);
476 if (r)
477 goto out;
478
479 if (guest_irq_type)
480 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
481out:
482 mutex_unlock(&kvm->lock);
483 return r;
484}
485
486static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
487 struct kvm_assigned_irq
488 *assigned_irq)
489{
490 int r = -ENODEV;
491 struct kvm_assigned_dev_kernel *match;
492
493 mutex_lock(&kvm->lock);
494
495 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
496 assigned_irq->assigned_dev_id);
497 if (!match)
498 goto out;
499
500 r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
501out:
502 mutex_unlock(&kvm->lock);
503 return r;
504}
505
506static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
507 struct kvm_assigned_pci_dev *assigned_dev)
508{
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200509 int r = 0, idx;
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300510 struct kvm_assigned_dev_kernel *match;
511 struct pci_dev *dev;
512
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300513 mutex_lock(&kvm->lock);
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200514 idx = srcu_read_lock(&kvm->srcu);
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300515
516 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
517 assigned_dev->assigned_dev_id);
518 if (match) {
519 /* device already assigned */
520 r = -EEXIST;
521 goto out;
522 }
523
524 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
525 if (match == NULL) {
526 printk(KERN_INFO "%s: Couldn't allocate memory\n",
527 __func__);
528 r = -ENOMEM;
529 goto out;
530 }
Zhai, Edwinab9f4ec2010-01-29 14:38:44 +0800531 dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
532 assigned_dev->busnr,
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300533 assigned_dev->devfn);
534 if (!dev) {
535 printk(KERN_INFO "%s: host device not found\n", __func__);
536 r = -EINVAL;
537 goto out_free;
538 }
539 if (pci_enable_device(dev)) {
540 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
541 r = -EBUSY;
542 goto out_put;
543 }
544 r = pci_request_regions(dev, "kvm_assigned_device");
545 if (r) {
546 printk(KERN_INFO "%s: Could not get access to device regions\n",
547 __func__);
548 goto out_disable;
549 }
550
551 pci_reset_function(dev);
552
553 match->assigned_dev_id = assigned_dev->assigned_dev_id;
Zhai, Edwinab9f4ec2010-01-29 14:38:44 +0800554 match->host_segnr = assigned_dev->segnr;
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300555 match->host_busnr = assigned_dev->busnr;
556 match->host_devfn = assigned_dev->devfn;
557 match->flags = assigned_dev->flags;
558 match->dev = dev;
559 spin_lock_init(&match->assigned_dev_lock);
560 match->irq_source_id = -1;
561 match->kvm = kvm;
562 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
563 INIT_WORK(&match->interrupt_work,
564 kvm_assigned_dev_interrupt_work_handler);
565
566 list_add(&match->list, &kvm->arch.assigned_dev_head);
567
568 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
569 if (!kvm->arch.iommu_domain) {
570 r = kvm_iommu_map_guest(kvm);
571 if (r)
572 goto out_list_del;
573 }
574 r = kvm_assign_device(kvm, match);
575 if (r)
576 goto out_list_del;
577 }
578
579out:
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200580 srcu_read_unlock(&kvm->srcu, idx);
Sheng Yangfae3a352009-12-15 10:28:07 +0800581 mutex_unlock(&kvm->lock);
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300582 return r;
583out_list_del:
584 list_del(&match->list);
585 pci_release_regions(dev);
586out_disable:
587 pci_disable_device(dev);
588out_put:
589 pci_dev_put(dev);
590out_free:
591 kfree(match);
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200592 srcu_read_unlock(&kvm->srcu, idx);
Sheng Yangfae3a352009-12-15 10:28:07 +0800593 mutex_unlock(&kvm->lock);
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300594 return r;
595}
596
597static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
598 struct kvm_assigned_pci_dev *assigned_dev)
599{
600 int r = 0;
601 struct kvm_assigned_dev_kernel *match;
602
603 mutex_lock(&kvm->lock);
604
605 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
606 assigned_dev->assigned_dev_id);
607 if (!match) {
608 printk(KERN_INFO "%s: device hasn't been assigned before, "
609 "so cannot be deassigned\n", __func__);
610 r = -EINVAL;
611 goto out;
612 }
613
614 if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
615 kvm_deassign_device(kvm, match);
616
617 kvm_free_assigned_device(kvm, match);
618
619out:
620 mutex_unlock(&kvm->lock);
621 return r;
622}
623
624
625#ifdef __KVM_HAVE_MSIX
626static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
627 struct kvm_assigned_msix_nr *entry_nr)
628{
629 int r = 0;
630 struct kvm_assigned_dev_kernel *adev;
631
632 mutex_lock(&kvm->lock);
633
634 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
635 entry_nr->assigned_dev_id);
636 if (!adev) {
637 r = -EINVAL;
638 goto msix_nr_out;
639 }
640
641 if (adev->entries_nr == 0) {
642 adev->entries_nr = entry_nr->entry_nr;
643 if (adev->entries_nr == 0 ||
644 adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) {
645 r = -EINVAL;
646 goto msix_nr_out;
647 }
648
649 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
650 entry_nr->entry_nr,
651 GFP_KERNEL);
652 if (!adev->host_msix_entries) {
653 r = -ENOMEM;
654 goto msix_nr_out;
655 }
656 adev->guest_msix_entries = kzalloc(
657 sizeof(struct kvm_guest_msix_entry) *
658 entry_nr->entry_nr, GFP_KERNEL);
659 if (!adev->guest_msix_entries) {
660 kfree(adev->host_msix_entries);
661 r = -ENOMEM;
662 goto msix_nr_out;
663 }
664 } else /* Not allowed set MSI-X number twice */
665 r = -EINVAL;
666msix_nr_out:
667 mutex_unlock(&kvm->lock);
668 return r;
669}
670
671static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
672 struct kvm_assigned_msix_entry *entry)
673{
674 int r = 0, i;
675 struct kvm_assigned_dev_kernel *adev;
676
677 mutex_lock(&kvm->lock);
678
679 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
680 entry->assigned_dev_id);
681
682 if (!adev) {
683 r = -EINVAL;
684 goto msix_entry_out;
685 }
686
687 for (i = 0; i < adev->entries_nr; i++)
688 if (adev->guest_msix_entries[i].vector == 0 ||
689 adev->guest_msix_entries[i].entry == entry->entry) {
690 adev->guest_msix_entries[i].entry = entry->entry;
691 adev->guest_msix_entries[i].vector = entry->gsi;
692 adev->host_msix_entries[i].entry = entry->entry;
693 break;
694 }
695 if (i == adev->entries_nr) {
696 r = -ENOSPC;
697 goto msix_entry_out;
698 }
699
700msix_entry_out:
701 mutex_unlock(&kvm->lock);
702
703 return r;
704}
705#endif
706
707long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
708 unsigned long arg)
709{
710 void __user *argp = (void __user *)arg;
711 int r = -ENOTTY;
712
713 switch (ioctl) {
714 case KVM_ASSIGN_PCI_DEVICE: {
715 struct kvm_assigned_pci_dev assigned_dev;
716
717 r = -EFAULT;
718 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
719 goto out;
720 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
721 if (r)
722 goto out;
723 break;
724 }
725 case KVM_ASSIGN_IRQ: {
726 r = -EOPNOTSUPP;
727 break;
728 }
729#ifdef KVM_CAP_ASSIGN_DEV_IRQ
730 case KVM_ASSIGN_DEV_IRQ: {
731 struct kvm_assigned_irq assigned_irq;
732
733 r = -EFAULT;
734 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
735 goto out;
736 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
737 if (r)
738 goto out;
739 break;
740 }
741 case KVM_DEASSIGN_DEV_IRQ: {
742 struct kvm_assigned_irq assigned_irq;
743
744 r = -EFAULT;
745 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
746 goto out;
747 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
748 if (r)
749 goto out;
750 break;
751 }
752#endif
753#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
754 case KVM_DEASSIGN_PCI_DEVICE: {
755 struct kvm_assigned_pci_dev assigned_dev;
756
757 r = -EFAULT;
758 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
759 goto out;
760 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
761 if (r)
762 goto out;
763 break;
764 }
765#endif
766#ifdef KVM_CAP_IRQ_ROUTING
767 case KVM_SET_GSI_ROUTING: {
768 struct kvm_irq_routing routing;
769 struct kvm_irq_routing __user *urouting;
770 struct kvm_irq_routing_entry *entries;
771
772 r = -EFAULT;
773 if (copy_from_user(&routing, argp, sizeof(routing)))
774 goto out;
775 r = -EINVAL;
776 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
777 goto out;
778 if (routing.flags)
779 goto out;
780 r = -ENOMEM;
781 entries = vmalloc(routing.nr * sizeof(*entries));
782 if (!entries)
783 goto out;
784 r = -EFAULT;
785 urouting = argp;
786 if (copy_from_user(entries, urouting->entries,
787 routing.nr * sizeof(*entries)))
788 goto out_free_irq_routing;
789 r = kvm_set_irq_routing(kvm, entries, routing.nr,
790 routing.flags);
791 out_free_irq_routing:
792 vfree(entries);
793 break;
794 }
795#endif /* KVM_CAP_IRQ_ROUTING */
796#ifdef __KVM_HAVE_MSIX
797 case KVM_ASSIGN_SET_MSIX_NR: {
798 struct kvm_assigned_msix_nr entry_nr;
799 r = -EFAULT;
800 if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
801 goto out;
802 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
803 if (r)
804 goto out;
805 break;
806 }
807 case KVM_ASSIGN_SET_MSIX_ENTRY: {
808 struct kvm_assigned_msix_entry entry;
809 r = -EFAULT;
810 if (copy_from_user(&entry, argp, sizeof entry))
811 goto out;
812 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
813 if (r)
814 goto out;
815 break;
816 }
817#endif
818 }
819out:
820 return r;
821}
822