blob: 2ca4535f4fb76b2629986e29c6225e77febdd698 [file] [log] [blame]
Gregory Haskins721eecbf2009-05-20 10:30:49 -04001/*
2 * kvm eventfd support - use eventfd objects to signal various KVM events
3 *
4 * Copyright 2009 Novell. All Rights Reserved.
Avi Kivity221d0592010-05-23 18:37:00 +03005 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Gregory Haskins721eecbf2009-05-20 10:30:49 -04006 *
7 * Author:
8 * Gregory Haskins <ghaskins@novell.com>
9 *
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 */
23
24#include <linux/kvm_host.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040025#include <linux/kvm.h>
Gregory Haskins721eecbf2009-05-20 10:30:49 -040026#include <linux/workqueue.h>
27#include <linux/syscalls.h>
28#include <linux/wait.h>
29#include <linux/poll.h>
30#include <linux/file.h>
31#include <linux/list.h>
32#include <linux/eventfd.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040033#include <linux/kernel.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090034#include <linux/slab.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040035
36#include "iodev.h"
Gregory Haskins721eecbf2009-05-20 10:30:49 -040037
38/*
39 * --------------------------------------------------------------------
40 * irqfd: Allows an fd to be used to inject an interrupt to the guest
41 *
42 * Credit goes to Avi Kivity for the original idea.
43 * --------------------------------------------------------------------
44 */
45
46struct _irqfd {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +020047 /* Used for MSI fast-path */
48 struct kvm *kvm;
49 wait_queue_t wait;
50 /* Update side is protected by irqfds.lock */
51 struct kvm_kernel_irq_routing_entry __rcu *irq_entry;
52 /* Used for level IRQ fast-path */
53 int gsi;
54 struct work_struct inject;
55 /* Used for setup/shutdown */
56 struct eventfd_ctx *eventfd;
57 struct list_head list;
58 poll_table pt;
59 struct work_struct shutdown;
Gregory Haskins721eecbf2009-05-20 10:30:49 -040060};
61
62static struct workqueue_struct *irqfd_cleanup_wq;
63
64static void
65irqfd_inject(struct work_struct *work)
66{
67 struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
68 struct kvm *kvm = irqfd->kvm;
69
Gregory Haskins721eecbf2009-05-20 10:30:49 -040070 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1);
71 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0);
Gregory Haskins721eecbf2009-05-20 10:30:49 -040072}
73
74/*
75 * Race-free decouple logic (ordering is critical)
76 */
77static void
78irqfd_shutdown(struct work_struct *work)
79{
80 struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown);
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +020081 u64 cnt;
Gregory Haskins721eecbf2009-05-20 10:30:49 -040082
83 /*
84 * Synchronize with the wait-queue and unhook ourselves to prevent
85 * further events.
86 */
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +020087 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
Gregory Haskins721eecbf2009-05-20 10:30:49 -040088
89 /*
90 * We know no new events will be scheduled at this point, so block
91 * until all previously outstanding events have completed
92 */
93 flush_work(&irqfd->inject);
94
95 /*
96 * It is now safe to release the object's resources
97 */
98 eventfd_ctx_put(irqfd->eventfd);
99 kfree(irqfd);
100}
101
102
103/* assumes kvm->irqfds.lock is held */
104static bool
105irqfd_is_active(struct _irqfd *irqfd)
106{
107 return list_empty(&irqfd->list) ? false : true;
108}
109
110/*
111 * Mark the irqfd as inactive and schedule it for removal
112 *
113 * assumes kvm->irqfds.lock is held
114 */
115static void
116irqfd_deactivate(struct _irqfd *irqfd)
117{
118 BUG_ON(!irqfd_is_active(irqfd));
119
120 list_del_init(&irqfd->list);
121
122 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
123}
124
125/*
126 * Called with wqh->lock held and interrupts disabled
127 */
128static int
129irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
130{
131 struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
132 unsigned long flags = (unsigned long)key;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200133 struct kvm_kernel_irq_routing_entry *irq;
134 struct kvm *kvm = irqfd->kvm;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400135
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200136 if (flags & POLLIN) {
137 rcu_read_lock();
138 irq = rcu_dereference(irqfd->irq_entry);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400139 /* An event has been signaled, inject an interrupt */
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200140 if (irq)
141 kvm_set_msi(irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1);
142 else
143 schedule_work(&irqfd->inject);
144 rcu_read_unlock();
145 }
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400146
147 if (flags & POLLHUP) {
148 /* The eventfd is closing, detach from KVM */
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400149 unsigned long flags;
150
151 spin_lock_irqsave(&kvm->irqfds.lock, flags);
152
153 /*
154 * We must check if someone deactivated the irqfd before
155 * we could acquire the irqfds.lock since the item is
156 * deactivated from the KVM side before it is unhooked from
157 * the wait-queue. If it is already deactivated, we can
158 * simply return knowing the other side will cleanup for us.
159 * We cannot race against the irqfd going away since the
160 * other side is required to acquire wqh->lock, which we hold
161 */
162 if (irqfd_is_active(irqfd))
163 irqfd_deactivate(irqfd);
164
165 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
166 }
167
168 return 0;
169}
170
171static void
172irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
173 poll_table *pt)
174{
175 struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400176 add_wait_queue(wqh, &irqfd->wait);
177}
178
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200179/* Must be called under irqfds.lock */
180static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd,
181 struct kvm_irq_routing_table *irq_rt)
182{
183 struct kvm_kernel_irq_routing_entry *e;
184 struct hlist_node *n;
185
186 if (irqfd->gsi >= irq_rt->nr_rt_entries) {
187 rcu_assign_pointer(irqfd->irq_entry, NULL);
188 return;
189 }
190
191 hlist_for_each_entry(e, n, &irq_rt->map[irqfd->gsi], link) {
192 /* Only fast-path MSI. */
193 if (e->type == KVM_IRQ_ROUTING_MSI)
194 rcu_assign_pointer(irqfd->irq_entry, e);
195 else
196 rcu_assign_pointer(irqfd->irq_entry, NULL);
197 }
198}
199
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400200static int
201kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi)
202{
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200203 struct kvm_irq_routing_table *irq_rt;
Michael S. Tsirkinf1d1c302010-01-13 18:58:09 +0200204 struct _irqfd *irqfd, *tmp;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400205 struct file *file = NULL;
206 struct eventfd_ctx *eventfd = NULL;
207 int ret;
208 unsigned int events;
209
210 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
211 if (!irqfd)
212 return -ENOMEM;
213
214 irqfd->kvm = kvm;
215 irqfd->gsi = gsi;
216 INIT_LIST_HEAD(&irqfd->list);
217 INIT_WORK(&irqfd->inject, irqfd_inject);
218 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
219
220 file = eventfd_fget(fd);
221 if (IS_ERR(file)) {
222 ret = PTR_ERR(file);
223 goto fail;
224 }
225
226 eventfd = eventfd_ctx_fileget(file);
227 if (IS_ERR(eventfd)) {
228 ret = PTR_ERR(eventfd);
229 goto fail;
230 }
231
232 irqfd->eventfd = eventfd;
233
234 /*
235 * Install our own custom wake-up handling so we are notified via
236 * a callback whenever someone signals the underlying eventfd
237 */
238 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
239 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
240
Michael S. Tsirkinf1d1c302010-01-13 18:58:09 +0200241 spin_lock_irq(&kvm->irqfds.lock);
242
243 ret = 0;
244 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
245 if (irqfd->eventfd != tmp->eventfd)
246 continue;
247 /* This fd is used for another irq already. */
248 ret = -EBUSY;
249 spin_unlock_irq(&kvm->irqfds.lock);
250 goto fail;
251 }
252
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200253 irq_rt = rcu_dereference_protected(kvm->irq_routing,
254 lockdep_is_held(&kvm->irqfds.lock));
255 irqfd_update(kvm, irqfd, irq_rt);
256
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400257 events = file->f_op->poll(file, &irqfd->pt);
258
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400259 list_add_tail(&irqfd->list, &kvm->irqfds.items);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400260
261 /*
262 * Check if there was an event already pending on the eventfd
263 * before we registered, and trigger it as if we didn't miss it.
264 */
265 if (events & POLLIN)
266 schedule_work(&irqfd->inject);
267
Michael S. Tsirkin6bbfb262010-09-19 19:02:31 +0200268 spin_unlock_irq(&kvm->irqfds.lock);
269
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400270 /*
271 * do not drop the file until the irqfd is fully initialized, otherwise
272 * we might race against the POLLHUP
273 */
274 fput(file);
275
276 return 0;
277
278fail:
279 if (eventfd && !IS_ERR(eventfd))
280 eventfd_ctx_put(eventfd);
281
Julia Lawall62230112009-07-28 17:53:24 +0200282 if (!IS_ERR(file))
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400283 fput(file);
284
285 kfree(irqfd);
286 return ret;
287}
288
289void
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400290kvm_eventfd_init(struct kvm *kvm)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400291{
292 spin_lock_init(&kvm->irqfds.lock);
293 INIT_LIST_HEAD(&kvm->irqfds.items);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400294 INIT_LIST_HEAD(&kvm->ioeventfds);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400295}
296
297/*
298 * shutdown any irqfd's that match fd+gsi
299 */
300static int
301kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi)
302{
303 struct _irqfd *irqfd, *tmp;
304 struct eventfd_ctx *eventfd;
305
306 eventfd = eventfd_ctx_fdget(fd);
307 if (IS_ERR(eventfd))
308 return PTR_ERR(eventfd);
309
310 spin_lock_irq(&kvm->irqfds.lock);
311
312 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200313 if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) {
314 /*
315 * This rcu_assign_pointer is needed for when
316 * another thread calls kvm_irqfd_update before
317 * we flush workqueue below.
318 * It is paired with synchronize_rcu done by caller
319 * of that function.
320 */
321 rcu_assign_pointer(irqfd->irq_entry, NULL);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400322 irqfd_deactivate(irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200323 }
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400324 }
325
326 spin_unlock_irq(&kvm->irqfds.lock);
327 eventfd_ctx_put(eventfd);
328
329 /*
330 * Block until we know all outstanding shutdown jobs have completed
331 * so that we guarantee there will not be any more interrupts on this
332 * gsi once this deassign function returns.
333 */
334 flush_workqueue(irqfd_cleanup_wq);
335
336 return 0;
337}
338
339int
340kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags)
341{
342 if (flags & KVM_IRQFD_FLAG_DEASSIGN)
343 return kvm_irqfd_deassign(kvm, fd, gsi);
344
345 return kvm_irqfd_assign(kvm, fd, gsi);
346}
347
348/*
349 * This function is called as the kvm VM fd is being released. Shutdown all
350 * irqfds that still remain open
351 */
352void
353kvm_irqfd_release(struct kvm *kvm)
354{
355 struct _irqfd *irqfd, *tmp;
356
357 spin_lock_irq(&kvm->irqfds.lock);
358
359 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
360 irqfd_deactivate(irqfd);
361
362 spin_unlock_irq(&kvm->irqfds.lock);
363
364 /*
365 * Block until we know all outstanding shutdown jobs have completed
366 * since we do not take a kvm* reference.
367 */
368 flush_workqueue(irqfd_cleanup_wq);
369
370}
371
372/*
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200373 * Change irq_routing and irqfd.
374 * Caller must invoke synchronize_rcu afterwards.
375 */
376void kvm_irq_routing_update(struct kvm *kvm,
377 struct kvm_irq_routing_table *irq_rt)
378{
379 struct _irqfd *irqfd;
380
381 spin_lock_irq(&kvm->irqfds.lock);
382
383 rcu_assign_pointer(kvm->irq_routing, irq_rt);
384
385 list_for_each_entry(irqfd, &kvm->irqfds.items, list)
386 irqfd_update(kvm, irqfd, irq_rt);
387
388 spin_unlock_irq(&kvm->irqfds.lock);
389}
390
391/*
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400392 * create a host-wide workqueue for issuing deferred shutdown requests
393 * aggregated from all vm* instances. We need our own isolated single-thread
394 * queue to prevent deadlock against flushing the normal work-queue.
395 */
396static int __init irqfd_module_init(void)
397{
398 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
399 if (!irqfd_cleanup_wq)
400 return -ENOMEM;
401
402 return 0;
403}
404
405static void __exit irqfd_module_exit(void)
406{
407 destroy_workqueue(irqfd_cleanup_wq);
408}
409
410module_init(irqfd_module_init);
411module_exit(irqfd_module_exit);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400412
413/*
414 * --------------------------------------------------------------------
415 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
416 *
417 * userspace can register a PIO/MMIO address with an eventfd for receiving
418 * notification when the memory has been touched.
419 * --------------------------------------------------------------------
420 */
421
422struct _ioeventfd {
423 struct list_head list;
424 u64 addr;
425 int length;
426 struct eventfd_ctx *eventfd;
427 u64 datamatch;
428 struct kvm_io_device dev;
429 bool wildcard;
430};
431
432static inline struct _ioeventfd *
433to_ioeventfd(struct kvm_io_device *dev)
434{
435 return container_of(dev, struct _ioeventfd, dev);
436}
437
438static void
439ioeventfd_release(struct _ioeventfd *p)
440{
441 eventfd_ctx_put(p->eventfd);
442 list_del(&p->list);
443 kfree(p);
444}
445
446static bool
447ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
448{
449 u64 _val;
450
451 if (!(addr == p->addr && len == p->length))
452 /* address-range must be precise for a hit */
453 return false;
454
455 if (p->wildcard)
456 /* all else equal, wildcard is always a hit */
457 return true;
458
459 /* otherwise, we have to actually compare the data */
460
461 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
462
463 switch (len) {
464 case 1:
465 _val = *(u8 *)val;
466 break;
467 case 2:
468 _val = *(u16 *)val;
469 break;
470 case 4:
471 _val = *(u32 *)val;
472 break;
473 case 8:
474 _val = *(u64 *)val;
475 break;
476 default:
477 return false;
478 }
479
480 return _val == p->datamatch ? true : false;
481}
482
483/* MMIO/PIO writes trigger an event if the addr/val match */
484static int
485ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len,
486 const void *val)
487{
488 struct _ioeventfd *p = to_ioeventfd(this);
489
490 if (!ioeventfd_in_range(p, addr, len, val))
491 return -EOPNOTSUPP;
492
493 eventfd_signal(p->eventfd, 1);
494 return 0;
495}
496
497/*
498 * This function is called as KVM is completely shutting down. We do not
499 * need to worry about locking just nuke anything we have as quickly as possible
500 */
501static void
502ioeventfd_destructor(struct kvm_io_device *this)
503{
504 struct _ioeventfd *p = to_ioeventfd(this);
505
506 ioeventfd_release(p);
507}
508
509static const struct kvm_io_device_ops ioeventfd_ops = {
510 .write = ioeventfd_write,
511 .destructor = ioeventfd_destructor,
512};
513
514/* assumes kvm->slots_lock held */
515static bool
516ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
517{
518 struct _ioeventfd *_p;
519
520 list_for_each_entry(_p, &kvm->ioeventfds, list)
521 if (_p->addr == p->addr && _p->length == p->length &&
522 (_p->wildcard || p->wildcard ||
523 _p->datamatch == p->datamatch))
524 return true;
525
526 return false;
527}
528
529static int
530kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
531{
532 int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200533 enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400534 struct _ioeventfd *p;
535 struct eventfd_ctx *eventfd;
536 int ret;
537
538 /* must be natural-word sized */
539 switch (args->len) {
540 case 1:
541 case 2:
542 case 4:
543 case 8:
544 break;
545 default:
546 return -EINVAL;
547 }
548
549 /* check for range overflow */
550 if (args->addr + args->len < args->addr)
551 return -EINVAL;
552
553 /* check for extra flags that we don't understand */
554 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
555 return -EINVAL;
556
557 eventfd = eventfd_ctx_fdget(args->fd);
558 if (IS_ERR(eventfd))
559 return PTR_ERR(eventfd);
560
561 p = kzalloc(sizeof(*p), GFP_KERNEL);
562 if (!p) {
563 ret = -ENOMEM;
564 goto fail;
565 }
566
567 INIT_LIST_HEAD(&p->list);
568 p->addr = args->addr;
569 p->length = args->len;
570 p->eventfd = eventfd;
571
572 /* The datamatch feature is optional, otherwise this is a wildcard */
573 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
574 p->datamatch = args->datamatch;
575 else
576 p->wildcard = true;
577
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200578 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400579
580 /* Verify that there isnt a match already */
581 if (ioeventfd_check_collision(kvm, p)) {
582 ret = -EEXIST;
583 goto unlock_fail;
584 }
585
586 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
587
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200588 ret = kvm_io_bus_register_dev(kvm, bus_idx, &p->dev);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400589 if (ret < 0)
590 goto unlock_fail;
591
592 list_add_tail(&p->list, &kvm->ioeventfds);
593
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200594 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400595
596 return 0;
597
598unlock_fail:
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200599 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400600
601fail:
602 kfree(p);
603 eventfd_ctx_put(eventfd);
604
605 return ret;
606}
607
608static int
609kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
610{
611 int pio = args->flags & KVM_IOEVENTFD_FLAG_PIO;
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200612 enum kvm_bus bus_idx = pio ? KVM_PIO_BUS : KVM_MMIO_BUS;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400613 struct _ioeventfd *p, *tmp;
614 struct eventfd_ctx *eventfd;
615 int ret = -ENOENT;
616
617 eventfd = eventfd_ctx_fdget(args->fd);
618 if (IS_ERR(eventfd))
619 return PTR_ERR(eventfd);
620
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200621 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400622
623 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
624 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
625
626 if (p->eventfd != eventfd ||
627 p->addr != args->addr ||
628 p->length != args->len ||
629 p->wildcard != wildcard)
630 continue;
631
632 if (!p->wildcard && p->datamatch != args->datamatch)
633 continue;
634
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200635 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400636 ioeventfd_release(p);
637 ret = 0;
638 break;
639 }
640
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200641 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400642
643 eventfd_ctx_put(eventfd);
644
645 return ret;
646}
647
648int
649kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
650{
651 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
652 return kvm_deassign_ioeventfd(kvm, args);
653
654 return kvm_assign_ioeventfd(kvm, args);
655}