blob: 4d10b1e047f4e9b3c3c22df98b25be6d23247e12 [file] [log] [blame]
Avi Kivitybfd99ff2009-08-26 14:57:50 +03001/*
2 * Kernel-based Virtual Machine - device assignment support
3 *
4 * Copyright (C) 2006-9 Red Hat, Inc
5 *
6 * This work is licensed under the terms of the GNU GPL, version 2. See
7 * the COPYING file in the top-level directory.
8 *
9 */
10
11#include <linux/kvm_host.h>
12#include <linux/kvm.h>
13#include <linux/uaccess.h>
14#include <linux/vmalloc.h>
15#include <linux/errno.h>
16#include <linux/spinlock.h>
17#include <linux/pci.h>
18#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090019#include <linux/slab.h>
Avi Kivitybfd99ff2009-08-26 14:57:50 +030020#include "irq.h"
21
22static struct kvm_assigned_dev_kernel *kvm_find_assigned_dev(struct list_head *head,
23 int assigned_dev_id)
24{
25 struct list_head *ptr;
26 struct kvm_assigned_dev_kernel *match;
27
28 list_for_each(ptr, head) {
29 match = list_entry(ptr, struct kvm_assigned_dev_kernel, list);
30 if (match->assigned_dev_id == assigned_dev_id)
31 return match;
32 }
33 return NULL;
34}
35
36static int find_index_from_host_irq(struct kvm_assigned_dev_kernel
37 *assigned_dev, int irq)
38{
39 int i, index;
40 struct msix_entry *host_msix_entries;
41
42 host_msix_entries = assigned_dev->host_msix_entries;
43
44 index = -1;
45 for (i = 0; i < assigned_dev->entries_nr; i++)
46 if (irq == host_msix_entries[i].vector) {
47 index = i;
48 break;
49 }
50 if (index < 0) {
51 printk(KERN_WARNING "Fail to find correlated MSI-X entry!\n");
52 return 0;
53 }
54
55 return index;
56}
57
58static void kvm_assigned_dev_interrupt_work_handler(struct work_struct *work)
59{
60 struct kvm_assigned_dev_kernel *assigned_dev;
61 struct kvm *kvm;
62 int i;
63
64 assigned_dev = container_of(work, struct kvm_assigned_dev_kernel,
65 interrupt_work);
66 kvm = assigned_dev->kvm;
67
68 spin_lock_irq(&assigned_dev->assigned_dev_lock);
69 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
70 struct kvm_guest_msix_entry *guest_entries =
71 assigned_dev->guest_msix_entries;
72 for (i = 0; i < assigned_dev->entries_nr; i++) {
73 if (!(guest_entries[i].flags &
74 KVM_ASSIGNED_MSIX_PENDING))
75 continue;
76 guest_entries[i].flags &= ~KVM_ASSIGNED_MSIX_PENDING;
77 kvm_set_irq(assigned_dev->kvm,
78 assigned_dev->irq_source_id,
79 guest_entries[i].vector, 1);
80 }
81 } else
82 kvm_set_irq(assigned_dev->kvm, assigned_dev->irq_source_id,
83 assigned_dev->guest_irq, 1);
84
85 spin_unlock_irq(&assigned_dev->assigned_dev_lock);
86}
87
88static irqreturn_t kvm_assigned_dev_intr(int irq, void *dev_id)
89{
90 unsigned long flags;
91 struct kvm_assigned_dev_kernel *assigned_dev =
92 (struct kvm_assigned_dev_kernel *) dev_id;
93
94 spin_lock_irqsave(&assigned_dev->assigned_dev_lock, flags);
95 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
96 int index = find_index_from_host_irq(assigned_dev, irq);
97 if (index < 0)
98 goto out;
99 assigned_dev->guest_msix_entries[index].flags |=
100 KVM_ASSIGNED_MSIX_PENDING;
101 }
102
103 schedule_work(&assigned_dev->interrupt_work);
104
105 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_GUEST_INTX) {
106 disable_irq_nosync(irq);
107 assigned_dev->host_irq_disabled = true;
108 }
109
110out:
111 spin_unlock_irqrestore(&assigned_dev->assigned_dev_lock, flags);
112 return IRQ_HANDLED;
113}
114
115/* Ack the irq line for an assigned device */
116static void kvm_assigned_dev_ack_irq(struct kvm_irq_ack_notifier *kian)
117{
118 struct kvm_assigned_dev_kernel *dev;
119 unsigned long flags;
120
121 if (kian->gsi == -1)
122 return;
123
124 dev = container_of(kian, struct kvm_assigned_dev_kernel,
125 ack_notifier);
126
127 kvm_set_irq(dev->kvm, dev->irq_source_id, dev->guest_irq, 0);
128
129 /* The guest irq may be shared so this ack may be
130 * from another device.
131 */
132 spin_lock_irqsave(&dev->assigned_dev_lock, flags);
133 if (dev->host_irq_disabled) {
134 enable_irq(dev->host_irq);
135 dev->host_irq_disabled = false;
136 }
137 spin_unlock_irqrestore(&dev->assigned_dev_lock, flags);
138}
139
140static void deassign_guest_irq(struct kvm *kvm,
141 struct kvm_assigned_dev_kernel *assigned_dev)
142{
143 kvm_unregister_irq_ack_notifier(kvm, &assigned_dev->ack_notifier);
144 assigned_dev->ack_notifier.gsi = -1;
145
146 if (assigned_dev->irq_source_id != -1)
147 kvm_free_irq_source_id(kvm, assigned_dev->irq_source_id);
148 assigned_dev->irq_source_id = -1;
149 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_GUEST_MASK);
150}
151
152/* The function implicit hold kvm->lock mutex due to cancel_work_sync() */
153static void deassign_host_irq(struct kvm *kvm,
154 struct kvm_assigned_dev_kernel *assigned_dev)
155{
156 /*
157 * In kvm_free_device_irq, cancel_work_sync return true if:
158 * 1. work is scheduled, and then cancelled.
159 * 2. work callback is executed.
160 *
161 * The first one ensured that the irq is disabled and no more events
162 * would happen. But for the second one, the irq may be enabled (e.g.
163 * for MSI). So we disable irq here to prevent further events.
164 *
165 * Notice this maybe result in nested disable if the interrupt type is
166 * INTx, but it's OK for we are going to free it.
167 *
168 * If this function is a part of VM destroy, please ensure that till
169 * now, the kvm state is still legal for probably we also have to wait
170 * interrupt_work done.
171 */
172 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSIX) {
173 int i;
174 for (i = 0; i < assigned_dev->entries_nr; i++)
175 disable_irq_nosync(assigned_dev->
176 host_msix_entries[i].vector);
177
178 cancel_work_sync(&assigned_dev->interrupt_work);
179
180 for (i = 0; i < assigned_dev->entries_nr; i++)
181 free_irq(assigned_dev->host_msix_entries[i].vector,
182 (void *)assigned_dev);
183
184 assigned_dev->entries_nr = 0;
185 kfree(assigned_dev->host_msix_entries);
186 kfree(assigned_dev->guest_msix_entries);
187 pci_disable_msix(assigned_dev->dev);
188 } else {
189 /* Deal with MSI and INTx */
190 disable_irq_nosync(assigned_dev->host_irq);
191 cancel_work_sync(&assigned_dev->interrupt_work);
192
193 free_irq(assigned_dev->host_irq, (void *)assigned_dev);
194
195 if (assigned_dev->irq_requested_type & KVM_DEV_IRQ_HOST_MSI)
196 pci_disable_msi(assigned_dev->dev);
197 }
198
199 assigned_dev->irq_requested_type &= ~(KVM_DEV_IRQ_HOST_MASK);
200}
201
202static int kvm_deassign_irq(struct kvm *kvm,
203 struct kvm_assigned_dev_kernel *assigned_dev,
204 unsigned long irq_requested_type)
205{
206 unsigned long guest_irq_type, host_irq_type;
207
208 if (!irqchip_in_kernel(kvm))
209 return -EINVAL;
210 /* no irq assignment to deassign */
211 if (!assigned_dev->irq_requested_type)
212 return -ENXIO;
213
214 host_irq_type = irq_requested_type & KVM_DEV_IRQ_HOST_MASK;
215 guest_irq_type = irq_requested_type & KVM_DEV_IRQ_GUEST_MASK;
216
217 if (host_irq_type)
218 deassign_host_irq(kvm, assigned_dev);
219 if (guest_irq_type)
220 deassign_guest_irq(kvm, assigned_dev);
221
222 return 0;
223}
224
225static void kvm_free_assigned_irq(struct kvm *kvm,
226 struct kvm_assigned_dev_kernel *assigned_dev)
227{
228 kvm_deassign_irq(kvm, assigned_dev, assigned_dev->irq_requested_type);
229}
230
231static void kvm_free_assigned_device(struct kvm *kvm,
232 struct kvm_assigned_dev_kernel
233 *assigned_dev)
234{
235 kvm_free_assigned_irq(kvm, assigned_dev);
236
237 pci_reset_function(assigned_dev->dev);
238
239 pci_release_regions(assigned_dev->dev);
240 pci_disable_device(assigned_dev->dev);
241 pci_dev_put(assigned_dev->dev);
242
243 list_del(&assigned_dev->list);
244 kfree(assigned_dev);
245}
246
247void kvm_free_all_assigned_devices(struct kvm *kvm)
248{
249 struct list_head *ptr, *ptr2;
250 struct kvm_assigned_dev_kernel *assigned_dev;
251
252 list_for_each_safe(ptr, ptr2, &kvm->arch.assigned_dev_head) {
253 assigned_dev = list_entry(ptr,
254 struct kvm_assigned_dev_kernel,
255 list);
256
257 kvm_free_assigned_device(kvm, assigned_dev);
258 }
259}
260
261static int assigned_device_enable_host_intx(struct kvm *kvm,
262 struct kvm_assigned_dev_kernel *dev)
263{
264 dev->host_irq = dev->dev->irq;
265 /* Even though this is PCI, we don't want to use shared
266 * interrupts. Sharing host devices with guest-assigned devices
267 * on the same interrupt line is not a happy situation: there
268 * are going to be long delays in accepting, acking, etc.
269 */
270 if (request_irq(dev->host_irq, kvm_assigned_dev_intr,
271 0, "kvm_assigned_intx_device", (void *)dev))
272 return -EIO;
273 return 0;
274}
275
276#ifdef __KVM_HAVE_MSI
277static int assigned_device_enable_host_msi(struct kvm *kvm,
278 struct kvm_assigned_dev_kernel *dev)
279{
280 int r;
281
282 if (!dev->dev->msi_enabled) {
283 r = pci_enable_msi(dev->dev);
284 if (r)
285 return r;
286 }
287
288 dev->host_irq = dev->dev->irq;
289 if (request_irq(dev->host_irq, kvm_assigned_dev_intr, 0,
290 "kvm_assigned_msi_device", (void *)dev)) {
291 pci_disable_msi(dev->dev);
292 return -EIO;
293 }
294
295 return 0;
296}
297#endif
298
299#ifdef __KVM_HAVE_MSIX
300static int assigned_device_enable_host_msix(struct kvm *kvm,
301 struct kvm_assigned_dev_kernel *dev)
302{
303 int i, r = -EINVAL;
304
305 /* host_msix_entries and guest_msix_entries should have been
306 * initialized */
307 if (dev->entries_nr == 0)
308 return r;
309
310 r = pci_enable_msix(dev->dev, dev->host_msix_entries, dev->entries_nr);
311 if (r)
312 return r;
313
314 for (i = 0; i < dev->entries_nr; i++) {
315 r = request_irq(dev->host_msix_entries[i].vector,
316 kvm_assigned_dev_intr, 0,
317 "kvm_assigned_msix_device",
318 (void *)dev);
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300319 if (r)
jing zhangd57e2c02010-03-13 15:00:45 +0800320 goto err;
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300321 }
322
323 return 0;
jing zhangd57e2c02010-03-13 15:00:45 +0800324err:
325 for (i -= 1; i >= 0; i--)
326 free_irq(dev->host_msix_entries[i].vector, (void *)dev);
327 pci_disable_msix(dev->dev);
328 return r;
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300329}
330
331#endif
332
333static int assigned_device_enable_guest_intx(struct kvm *kvm,
334 struct kvm_assigned_dev_kernel *dev,
335 struct kvm_assigned_irq *irq)
336{
337 dev->guest_irq = irq->guest_irq;
338 dev->ack_notifier.gsi = irq->guest_irq;
339 return 0;
340}
341
342#ifdef __KVM_HAVE_MSI
343static int assigned_device_enable_guest_msi(struct kvm *kvm,
344 struct kvm_assigned_dev_kernel *dev,
345 struct kvm_assigned_irq *irq)
346{
347 dev->guest_irq = irq->guest_irq;
348 dev->ack_notifier.gsi = -1;
349 dev->host_irq_disabled = false;
350 return 0;
351}
352#endif
353
354#ifdef __KVM_HAVE_MSIX
355static int assigned_device_enable_guest_msix(struct kvm *kvm,
356 struct kvm_assigned_dev_kernel *dev,
357 struct kvm_assigned_irq *irq)
358{
359 dev->guest_irq = irq->guest_irq;
360 dev->ack_notifier.gsi = -1;
361 dev->host_irq_disabled = false;
362 return 0;
363}
364#endif
365
366static int assign_host_irq(struct kvm *kvm,
367 struct kvm_assigned_dev_kernel *dev,
368 __u32 host_irq_type)
369{
370 int r = -EEXIST;
371
372 if (dev->irq_requested_type & KVM_DEV_IRQ_HOST_MASK)
373 return r;
374
375 switch (host_irq_type) {
376 case KVM_DEV_IRQ_HOST_INTX:
377 r = assigned_device_enable_host_intx(kvm, dev);
378 break;
379#ifdef __KVM_HAVE_MSI
380 case KVM_DEV_IRQ_HOST_MSI:
381 r = assigned_device_enable_host_msi(kvm, dev);
382 break;
383#endif
384#ifdef __KVM_HAVE_MSIX
385 case KVM_DEV_IRQ_HOST_MSIX:
386 r = assigned_device_enable_host_msix(kvm, dev);
387 break;
388#endif
389 default:
390 r = -EINVAL;
391 }
392
393 if (!r)
394 dev->irq_requested_type |= host_irq_type;
395
396 return r;
397}
398
399static int assign_guest_irq(struct kvm *kvm,
400 struct kvm_assigned_dev_kernel *dev,
401 struct kvm_assigned_irq *irq,
402 unsigned long guest_irq_type)
403{
404 int id;
405 int r = -EEXIST;
406
407 if (dev->irq_requested_type & KVM_DEV_IRQ_GUEST_MASK)
408 return r;
409
410 id = kvm_request_irq_source_id(kvm);
411 if (id < 0)
412 return id;
413
414 dev->irq_source_id = id;
415
416 switch (guest_irq_type) {
417 case KVM_DEV_IRQ_GUEST_INTX:
418 r = assigned_device_enable_guest_intx(kvm, dev, irq);
419 break;
420#ifdef __KVM_HAVE_MSI
421 case KVM_DEV_IRQ_GUEST_MSI:
422 r = assigned_device_enable_guest_msi(kvm, dev, irq);
423 break;
424#endif
425#ifdef __KVM_HAVE_MSIX
426 case KVM_DEV_IRQ_GUEST_MSIX:
427 r = assigned_device_enable_guest_msix(kvm, dev, irq);
428 break;
429#endif
430 default:
431 r = -EINVAL;
432 }
433
434 if (!r) {
435 dev->irq_requested_type |= guest_irq_type;
436 kvm_register_irq_ack_notifier(kvm, &dev->ack_notifier);
437 } else
438 kvm_free_irq_source_id(kvm, dev->irq_source_id);
439
440 return r;
441}
442
443/* TODO Deal with KVM_DEV_IRQ_ASSIGNED_MASK_MSIX */
444static int kvm_vm_ioctl_assign_irq(struct kvm *kvm,
445 struct kvm_assigned_irq *assigned_irq)
446{
447 int r = -EINVAL;
448 struct kvm_assigned_dev_kernel *match;
449 unsigned long host_irq_type, guest_irq_type;
450
451 if (!capable(CAP_SYS_RAWIO))
452 return -EPERM;
453
454 if (!irqchip_in_kernel(kvm))
455 return r;
456
457 mutex_lock(&kvm->lock);
458 r = -ENODEV;
459 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
460 assigned_irq->assigned_dev_id);
461 if (!match)
462 goto out;
463
464 host_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_HOST_MASK);
465 guest_irq_type = (assigned_irq->flags & KVM_DEV_IRQ_GUEST_MASK);
466
467 r = -EINVAL;
468 /* can only assign one type at a time */
469 if (hweight_long(host_irq_type) > 1)
470 goto out;
471 if (hweight_long(guest_irq_type) > 1)
472 goto out;
473 if (host_irq_type == 0 && guest_irq_type == 0)
474 goto out;
475
476 r = 0;
477 if (host_irq_type)
478 r = assign_host_irq(kvm, match, host_irq_type);
479 if (r)
480 goto out;
481
482 if (guest_irq_type)
483 r = assign_guest_irq(kvm, match, assigned_irq, guest_irq_type);
484out:
485 mutex_unlock(&kvm->lock);
486 return r;
487}
488
489static int kvm_vm_ioctl_deassign_dev_irq(struct kvm *kvm,
490 struct kvm_assigned_irq
491 *assigned_irq)
492{
493 int r = -ENODEV;
494 struct kvm_assigned_dev_kernel *match;
495
496 mutex_lock(&kvm->lock);
497
498 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
499 assigned_irq->assigned_dev_id);
500 if (!match)
501 goto out;
502
503 r = kvm_deassign_irq(kvm, match, assigned_irq->flags);
504out:
505 mutex_unlock(&kvm->lock);
506 return r;
507}
508
509static int kvm_vm_ioctl_assign_device(struct kvm *kvm,
510 struct kvm_assigned_pci_dev *assigned_dev)
511{
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200512 int r = 0, idx;
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300513 struct kvm_assigned_dev_kernel *match;
514 struct pci_dev *dev;
515
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300516 mutex_lock(&kvm->lock);
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200517 idx = srcu_read_lock(&kvm->srcu);
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300518
519 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
520 assigned_dev->assigned_dev_id);
521 if (match) {
522 /* device already assigned */
523 r = -EEXIST;
524 goto out;
525 }
526
527 match = kzalloc(sizeof(struct kvm_assigned_dev_kernel), GFP_KERNEL);
528 if (match == NULL) {
529 printk(KERN_INFO "%s: Couldn't allocate memory\n",
530 __func__);
531 r = -ENOMEM;
532 goto out;
533 }
Zhai, Edwinab9f4ec2010-01-29 14:38:44 +0800534 dev = pci_get_domain_bus_and_slot(assigned_dev->segnr,
535 assigned_dev->busnr,
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300536 assigned_dev->devfn);
537 if (!dev) {
538 printk(KERN_INFO "%s: host device not found\n", __func__);
539 r = -EINVAL;
540 goto out_free;
541 }
542 if (pci_enable_device(dev)) {
543 printk(KERN_INFO "%s: Could not enable PCI device\n", __func__);
544 r = -EBUSY;
545 goto out_put;
546 }
547 r = pci_request_regions(dev, "kvm_assigned_device");
548 if (r) {
549 printk(KERN_INFO "%s: Could not get access to device regions\n",
550 __func__);
551 goto out_disable;
552 }
553
554 pci_reset_function(dev);
555
556 match->assigned_dev_id = assigned_dev->assigned_dev_id;
Zhai, Edwinab9f4ec2010-01-29 14:38:44 +0800557 match->host_segnr = assigned_dev->segnr;
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300558 match->host_busnr = assigned_dev->busnr;
559 match->host_devfn = assigned_dev->devfn;
560 match->flags = assigned_dev->flags;
561 match->dev = dev;
562 spin_lock_init(&match->assigned_dev_lock);
563 match->irq_source_id = -1;
564 match->kvm = kvm;
565 match->ack_notifier.irq_acked = kvm_assigned_dev_ack_irq;
566 INIT_WORK(&match->interrupt_work,
567 kvm_assigned_dev_interrupt_work_handler);
568
569 list_add(&match->list, &kvm->arch.assigned_dev_head);
570
571 if (assigned_dev->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU) {
572 if (!kvm->arch.iommu_domain) {
573 r = kvm_iommu_map_guest(kvm);
574 if (r)
575 goto out_list_del;
576 }
577 r = kvm_assign_device(kvm, match);
578 if (r)
579 goto out_list_del;
580 }
581
582out:
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200583 srcu_read_unlock(&kvm->srcu, idx);
Sheng Yangfae3a352009-12-15 10:28:07 +0800584 mutex_unlock(&kvm->lock);
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300585 return r;
586out_list_del:
587 list_del(&match->list);
588 pci_release_regions(dev);
589out_disable:
590 pci_disable_device(dev);
591out_put:
592 pci_dev_put(dev);
593out_free:
594 kfree(match);
Marcelo Tosattibc6678a2009-12-23 14:35:21 -0200595 srcu_read_unlock(&kvm->srcu, idx);
Sheng Yangfae3a352009-12-15 10:28:07 +0800596 mutex_unlock(&kvm->lock);
Avi Kivitybfd99ff2009-08-26 14:57:50 +0300597 return r;
598}
599
600static int kvm_vm_ioctl_deassign_device(struct kvm *kvm,
601 struct kvm_assigned_pci_dev *assigned_dev)
602{
603 int r = 0;
604 struct kvm_assigned_dev_kernel *match;
605
606 mutex_lock(&kvm->lock);
607
608 match = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
609 assigned_dev->assigned_dev_id);
610 if (!match) {
611 printk(KERN_INFO "%s: device hasn't been assigned before, "
612 "so cannot be deassigned\n", __func__);
613 r = -EINVAL;
614 goto out;
615 }
616
617 if (match->flags & KVM_DEV_ASSIGN_ENABLE_IOMMU)
618 kvm_deassign_device(kvm, match);
619
620 kvm_free_assigned_device(kvm, match);
621
622out:
623 mutex_unlock(&kvm->lock);
624 return r;
625}
626
627
628#ifdef __KVM_HAVE_MSIX
629static int kvm_vm_ioctl_set_msix_nr(struct kvm *kvm,
630 struct kvm_assigned_msix_nr *entry_nr)
631{
632 int r = 0;
633 struct kvm_assigned_dev_kernel *adev;
634
635 mutex_lock(&kvm->lock);
636
637 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
638 entry_nr->assigned_dev_id);
639 if (!adev) {
640 r = -EINVAL;
641 goto msix_nr_out;
642 }
643
644 if (adev->entries_nr == 0) {
645 adev->entries_nr = entry_nr->entry_nr;
646 if (adev->entries_nr == 0 ||
647 adev->entries_nr >= KVM_MAX_MSIX_PER_DEV) {
648 r = -EINVAL;
649 goto msix_nr_out;
650 }
651
652 adev->host_msix_entries = kzalloc(sizeof(struct msix_entry) *
653 entry_nr->entry_nr,
654 GFP_KERNEL);
655 if (!adev->host_msix_entries) {
656 r = -ENOMEM;
657 goto msix_nr_out;
658 }
659 adev->guest_msix_entries = kzalloc(
660 sizeof(struct kvm_guest_msix_entry) *
661 entry_nr->entry_nr, GFP_KERNEL);
662 if (!adev->guest_msix_entries) {
663 kfree(adev->host_msix_entries);
664 r = -ENOMEM;
665 goto msix_nr_out;
666 }
667 } else /* Not allowed set MSI-X number twice */
668 r = -EINVAL;
669msix_nr_out:
670 mutex_unlock(&kvm->lock);
671 return r;
672}
673
674static int kvm_vm_ioctl_set_msix_entry(struct kvm *kvm,
675 struct kvm_assigned_msix_entry *entry)
676{
677 int r = 0, i;
678 struct kvm_assigned_dev_kernel *adev;
679
680 mutex_lock(&kvm->lock);
681
682 adev = kvm_find_assigned_dev(&kvm->arch.assigned_dev_head,
683 entry->assigned_dev_id);
684
685 if (!adev) {
686 r = -EINVAL;
687 goto msix_entry_out;
688 }
689
690 for (i = 0; i < adev->entries_nr; i++)
691 if (adev->guest_msix_entries[i].vector == 0 ||
692 adev->guest_msix_entries[i].entry == entry->entry) {
693 adev->guest_msix_entries[i].entry = entry->entry;
694 adev->guest_msix_entries[i].vector = entry->gsi;
695 adev->host_msix_entries[i].entry = entry->entry;
696 break;
697 }
698 if (i == adev->entries_nr) {
699 r = -ENOSPC;
700 goto msix_entry_out;
701 }
702
703msix_entry_out:
704 mutex_unlock(&kvm->lock);
705
706 return r;
707}
708#endif
709
710long kvm_vm_ioctl_assigned_device(struct kvm *kvm, unsigned ioctl,
711 unsigned long arg)
712{
713 void __user *argp = (void __user *)arg;
714 int r = -ENOTTY;
715
716 switch (ioctl) {
717 case KVM_ASSIGN_PCI_DEVICE: {
718 struct kvm_assigned_pci_dev assigned_dev;
719
720 r = -EFAULT;
721 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
722 goto out;
723 r = kvm_vm_ioctl_assign_device(kvm, &assigned_dev);
724 if (r)
725 goto out;
726 break;
727 }
728 case KVM_ASSIGN_IRQ: {
729 r = -EOPNOTSUPP;
730 break;
731 }
732#ifdef KVM_CAP_ASSIGN_DEV_IRQ
733 case KVM_ASSIGN_DEV_IRQ: {
734 struct kvm_assigned_irq assigned_irq;
735
736 r = -EFAULT;
737 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
738 goto out;
739 r = kvm_vm_ioctl_assign_irq(kvm, &assigned_irq);
740 if (r)
741 goto out;
742 break;
743 }
744 case KVM_DEASSIGN_DEV_IRQ: {
745 struct kvm_assigned_irq assigned_irq;
746
747 r = -EFAULT;
748 if (copy_from_user(&assigned_irq, argp, sizeof assigned_irq))
749 goto out;
750 r = kvm_vm_ioctl_deassign_dev_irq(kvm, &assigned_irq);
751 if (r)
752 goto out;
753 break;
754 }
755#endif
756#ifdef KVM_CAP_DEVICE_DEASSIGNMENT
757 case KVM_DEASSIGN_PCI_DEVICE: {
758 struct kvm_assigned_pci_dev assigned_dev;
759
760 r = -EFAULT;
761 if (copy_from_user(&assigned_dev, argp, sizeof assigned_dev))
762 goto out;
763 r = kvm_vm_ioctl_deassign_device(kvm, &assigned_dev);
764 if (r)
765 goto out;
766 break;
767 }
768#endif
769#ifdef KVM_CAP_IRQ_ROUTING
770 case KVM_SET_GSI_ROUTING: {
771 struct kvm_irq_routing routing;
772 struct kvm_irq_routing __user *urouting;
773 struct kvm_irq_routing_entry *entries;
774
775 r = -EFAULT;
776 if (copy_from_user(&routing, argp, sizeof(routing)))
777 goto out;
778 r = -EINVAL;
779 if (routing.nr >= KVM_MAX_IRQ_ROUTES)
780 goto out;
781 if (routing.flags)
782 goto out;
783 r = -ENOMEM;
784 entries = vmalloc(routing.nr * sizeof(*entries));
785 if (!entries)
786 goto out;
787 r = -EFAULT;
788 urouting = argp;
789 if (copy_from_user(entries, urouting->entries,
790 routing.nr * sizeof(*entries)))
791 goto out_free_irq_routing;
792 r = kvm_set_irq_routing(kvm, entries, routing.nr,
793 routing.flags);
794 out_free_irq_routing:
795 vfree(entries);
796 break;
797 }
798#endif /* KVM_CAP_IRQ_ROUTING */
799#ifdef __KVM_HAVE_MSIX
800 case KVM_ASSIGN_SET_MSIX_NR: {
801 struct kvm_assigned_msix_nr entry_nr;
802 r = -EFAULT;
803 if (copy_from_user(&entry_nr, argp, sizeof entry_nr))
804 goto out;
805 r = kvm_vm_ioctl_set_msix_nr(kvm, &entry_nr);
806 if (r)
807 goto out;
808 break;
809 }
810 case KVM_ASSIGN_SET_MSIX_ENTRY: {
811 struct kvm_assigned_msix_entry entry;
812 r = -EFAULT;
813 if (copy_from_user(&entry, argp, sizeof entry))
814 goto out;
815 r = kvm_vm_ioctl_set_msix_entry(kvm, &entry);
816 if (r)
817 goto out;
818 break;
819 }
820#endif
821 }
822out:
823 return r;
824}
825