blob: e29fd26407096e9aaa1c83088d5ea36a52aca7f9 [file] [log] [blame]
Gregory Haskins721eecbf2009-05-20 10:30:49 -04001/*
2 * kvm eventfd support - use eventfd objects to signal various KVM events
3 *
4 * Copyright 2009 Novell. All Rights Reserved.
Avi Kivity221d0592010-05-23 18:37:00 +03005 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Gregory Haskins721eecbf2009-05-20 10:30:49 -04006 *
7 * Author:
8 * Gregory Haskins <ghaskins@novell.com>
9 *
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 */
23
24#include <linux/kvm_host.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040025#include <linux/kvm.h>
Eric Auger166c9772015-09-18 22:29:42 +080026#include <linux/kvm_irqfd.h>
Gregory Haskins721eecbf2009-05-20 10:30:49 -040027#include <linux/workqueue.h>
28#include <linux/syscalls.h>
29#include <linux/wait.h>
30#include <linux/poll.h>
31#include <linux/file.h>
32#include <linux/list.h>
33#include <linux/eventfd.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040034#include <linux/kernel.h>
Christian Borntraeger719d93c2014-01-16 13:44:20 +010035#include <linux/srcu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Paul Mackerras56f89f32014-06-30 20:51:09 +100037#include <linux/seqlock.h>
Eric Auger9016cfb2015-09-18 22:29:44 +080038#include <linux/irqbypass.h>
Paul Mackerrase4d57e12014-06-30 20:51:12 +100039#include <trace/events/kvm.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040040
Andre Przywaraaf669ac2015-03-26 14:39:29 +000041#include <kvm/iodev.h>
Gregory Haskins721eecbf2009-05-20 10:30:49 -040042
Paul Mackerras297e2102014-06-30 20:51:13 +100043#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -040044
45static struct workqueue_struct *irqfd_cleanup_wq;
46
47static void
48irqfd_inject(struct work_struct *work)
49{
Eric Auger166c9772015-09-18 22:29:42 +080050 struct kvm_kernel_irqfd *irqfd =
51 container_of(work, struct kvm_kernel_irqfd, inject);
Gregory Haskins721eecbf2009-05-20 10:30:49 -040052 struct kvm *kvm = irqfd->kvm;
53
Alex Williamson7a844282012-09-21 11:58:03 -060054 if (!irqfd->resampler) {
Yang Zhangaa2fbe62013-04-11 19:21:40 +080055 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
56 false);
57 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
58 false);
Alex Williamson7a844282012-09-21 11:58:03 -060059 } else
60 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +080061 irqfd->gsi, 1, false);
Alex Williamson7a844282012-09-21 11:58:03 -060062}
63
64/*
65 * Since resampler irqfds share an IRQ source ID, we de-assert once
66 * then notify all of the resampler irqfds using this GSI. We can't
67 * do multiple de-asserts or we risk racing with incoming re-asserts.
68 */
69static void
70irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
71{
Eric Auger166c9772015-09-18 22:29:42 +080072 struct kvm_kernel_irqfd_resampler *resampler;
Christian Borntraeger719d93c2014-01-16 13:44:20 +010073 struct kvm *kvm;
Eric Auger166c9772015-09-18 22:29:42 +080074 struct kvm_kernel_irqfd *irqfd;
Christian Borntraeger719d93c2014-01-16 13:44:20 +010075 int idx;
Alex Williamson7a844282012-09-21 11:58:03 -060076
Eric Auger166c9772015-09-18 22:29:42 +080077 resampler = container_of(kian,
78 struct kvm_kernel_irqfd_resampler, notifier);
Christian Borntraeger719d93c2014-01-16 13:44:20 +010079 kvm = resampler->kvm;
Alex Williamson7a844282012-09-21 11:58:03 -060080
Christian Borntraeger719d93c2014-01-16 13:44:20 +010081 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +080082 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -060083
Christian Borntraeger719d93c2014-01-16 13:44:20 +010084 idx = srcu_read_lock(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -060085
86 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
87 eventfd_signal(irqfd->resamplefd, 1);
88
Christian Borntraeger719d93c2014-01-16 13:44:20 +010089 srcu_read_unlock(&kvm->irq_srcu, idx);
Alex Williamson7a844282012-09-21 11:58:03 -060090}
91
92static void
Eric Auger166c9772015-09-18 22:29:42 +080093irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
Alex Williamson7a844282012-09-21 11:58:03 -060094{
Eric Auger166c9772015-09-18 22:29:42 +080095 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
Alex Williamson7a844282012-09-21 11:58:03 -060096 struct kvm *kvm = resampler->kvm;
97
98 mutex_lock(&kvm->irqfds.resampler_lock);
99
100 list_del_rcu(&irqfd->resampler_link);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100101 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600102
103 if (list_empty(&resampler->list)) {
104 list_del(&resampler->link);
105 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
106 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800107 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -0600108 kfree(resampler);
109 }
110
111 mutex_unlock(&kvm->irqfds.resampler_lock);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400112}
113
114/*
115 * Race-free decouple logic (ordering is critical)
116 */
117static void
118irqfd_shutdown(struct work_struct *work)
119{
Eric Auger166c9772015-09-18 22:29:42 +0800120 struct kvm_kernel_irqfd *irqfd =
121 container_of(work, struct kvm_kernel_irqfd, shutdown);
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200122 u64 cnt;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400123
124 /*
125 * Synchronize with the wait-queue and unhook ourselves to prevent
126 * further events.
127 */
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200128 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400129
130 /*
131 * We know no new events will be scheduled at this point, so block
132 * until all previously outstanding events have completed
133 */
Tejun Heo43829732012-08-20 14:51:24 -0700134 flush_work(&irqfd->inject);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400135
Alex Williamson7a844282012-09-21 11:58:03 -0600136 if (irqfd->resampler) {
137 irqfd_resampler_shutdown(irqfd);
138 eventfd_ctx_put(irqfd->resamplefd);
139 }
140
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400141 /*
142 * It is now safe to release the object's resources
143 */
Eric Auger9016cfb2015-09-18 22:29:44 +0800144#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
145 irq_bypass_unregister_consumer(&irqfd->consumer);
146#endif
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400147 eventfd_ctx_put(irqfd->eventfd);
148 kfree(irqfd);
149}
150
151
152/* assumes kvm->irqfds.lock is held */
153static bool
Eric Auger166c9772015-09-18 22:29:42 +0800154irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400155{
156 return list_empty(&irqfd->list) ? false : true;
157}
158
159/*
160 * Mark the irqfd as inactive and schedule it for removal
161 *
162 * assumes kvm->irqfds.lock is held
163 */
164static void
Eric Auger166c9772015-09-18 22:29:42 +0800165irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400166{
167 BUG_ON(!irqfd_is_active(irqfd));
168
169 list_del_init(&irqfd->list);
170
171 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
172}
173
Andrey Smetaninc9a5ecc2015-10-16 10:07:47 +0300174int __attribute__((weak)) kvm_arch_set_irq(
175 struct kvm_kernel_irq_routing_entry *irq,
176 struct kvm *kvm, int irq_source_id,
177 int level,
178 bool line_status)
179{
180 return -EWOULDBLOCK;
181}
182
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400183/*
184 * Called with wqh->lock held and interrupts disabled
185 */
186static int
187irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
188{
Eric Auger166c9772015-09-18 22:29:42 +0800189 struct kvm_kernel_irqfd *irqfd =
190 container_of(wait, struct kvm_kernel_irqfd, wait);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400191 unsigned long flags = (unsigned long)key;
Paul Mackerras56f89f32014-06-30 20:51:09 +1000192 struct kvm_kernel_irq_routing_entry irq;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200193 struct kvm *kvm = irqfd->kvm;
Paul Mackerras56f89f32014-06-30 20:51:09 +1000194 unsigned seq;
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100195 int idx;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400196
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200197 if (flags & POLLIN) {
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100198 idx = srcu_read_lock(&kvm->irq_srcu);
Paul Mackerras56f89f32014-06-30 20:51:09 +1000199 do {
200 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
201 irq = irqfd->irq_entry;
202 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400203 /* An event has been signaled, inject an interrupt */
Paul Mackerras56f89f32014-06-30 20:51:09 +1000204 if (irq.type == KVM_IRQ_ROUTING_MSI)
205 kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800206 false);
Andrey Smetaninc9a5ecc2015-10-16 10:07:47 +0300207 else if (kvm_arch_set_irq(&irq, kvm,
208 KVM_USERSPACE_IRQ_SOURCE_ID, 1,
209 false) == -EWOULDBLOCK)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200210 schedule_work(&irqfd->inject);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100211 srcu_read_unlock(&kvm->irq_srcu, idx);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200212 }
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400213
214 if (flags & POLLHUP) {
215 /* The eventfd is closing, detach from KVM */
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400216 unsigned long flags;
217
218 spin_lock_irqsave(&kvm->irqfds.lock, flags);
219
220 /*
221 * We must check if someone deactivated the irqfd before
222 * we could acquire the irqfds.lock since the item is
223 * deactivated from the KVM side before it is unhooked from
224 * the wait-queue. If it is already deactivated, we can
225 * simply return knowing the other side will cleanup for us.
226 * We cannot race against the irqfd going away since the
227 * other side is required to acquire wqh->lock, which we hold
228 */
229 if (irqfd_is_active(irqfd))
230 irqfd_deactivate(irqfd);
231
232 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
233 }
234
235 return 0;
236}
237
238static void
239irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
240 poll_table *pt)
241{
Eric Auger166c9772015-09-18 22:29:42 +0800242 struct kvm_kernel_irqfd *irqfd =
243 container_of(pt, struct kvm_kernel_irqfd, pt);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400244 add_wait_queue(wqh, &irqfd->wait);
245}
246
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200247/* Must be called under irqfds.lock */
Eric Auger166c9772015-09-18 22:29:42 +0800248static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200249{
250 struct kvm_kernel_irq_routing_entry *e;
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000251 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
Andrey Smetanin351dc6472015-10-16 10:07:45 +0300252 int n_entries;
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000253
Paul Mackerras9957c862014-06-30 20:51:11 +1000254 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200255
Paul Mackerras56f89f32014-06-30 20:51:09 +1000256 write_seqcount_begin(&irqfd->irq_entry_sc);
257
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000258 e = entries;
Andrey Smetanin351dc6472015-10-16 10:07:45 +0300259 if (n_entries == 1)
260 irqfd->irq_entry = *e;
261 else
262 irqfd->irq_entry.type = 0;
Paul Mackerras56f89f32014-06-30 20:51:09 +1000263
Paul Mackerras56f89f32014-06-30 20:51:09 +1000264 write_seqcount_end(&irqfd->irq_entry_sc);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200265}
266
Eric Auger1a02b272015-09-18 22:29:43 +0800267#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
268void __attribute__((weak)) kvm_arch_irq_bypass_stop(
269 struct irq_bypass_consumer *cons)
270{
271}
272
273void __attribute__((weak)) kvm_arch_irq_bypass_start(
274 struct irq_bypass_consumer *cons)
275{
276}
Feng Wuf70c20a2015-09-18 22:29:53 +0800277
278int __attribute__((weak)) kvm_arch_update_irqfd_routing(
279 struct kvm *kvm, unsigned int host_irq,
280 uint32_t guest_irq, bool set)
281{
282 return 0;
283}
Eric Auger1a02b272015-09-18 22:29:43 +0800284#endif
285
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400286static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600287kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400288{
Eric Auger166c9772015-09-18 22:29:42 +0800289 struct kvm_kernel_irqfd *irqfd, *tmp;
Al Virocffe78d2013-08-30 15:47:17 -0400290 struct fd f;
Alex Williamson7a844282012-09-21 11:58:03 -0600291 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400292 int ret;
293 unsigned int events;
Paul Mackerras9957c862014-06-30 20:51:11 +1000294 int idx;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400295
Eric Auger01c94e62015-03-04 11:14:33 +0100296 if (!kvm_arch_intc_initialized(kvm))
297 return -EAGAIN;
298
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400299 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
300 if (!irqfd)
301 return -ENOMEM;
302
303 irqfd->kvm = kvm;
Alex Williamsond4db2932012-06-29 09:56:08 -0600304 irqfd->gsi = args->gsi;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400305 INIT_LIST_HEAD(&irqfd->list);
306 INIT_WORK(&irqfd->inject, irqfd_inject);
307 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
Paul Mackerras56f89f32014-06-30 20:51:09 +1000308 seqcount_init(&irqfd->irq_entry_sc);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400309
Al Virocffe78d2013-08-30 15:47:17 -0400310 f = fdget(args->fd);
311 if (!f.file) {
312 ret = -EBADF;
313 goto out;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400314 }
315
Al Virocffe78d2013-08-30 15:47:17 -0400316 eventfd = eventfd_ctx_fileget(f.file);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400317 if (IS_ERR(eventfd)) {
318 ret = PTR_ERR(eventfd);
319 goto fail;
320 }
321
322 irqfd->eventfd = eventfd;
323
Alex Williamson7a844282012-09-21 11:58:03 -0600324 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
Eric Auger166c9772015-09-18 22:29:42 +0800325 struct kvm_kernel_irqfd_resampler *resampler;
Alex Williamson7a844282012-09-21 11:58:03 -0600326
327 resamplefd = eventfd_ctx_fdget(args->resamplefd);
328 if (IS_ERR(resamplefd)) {
329 ret = PTR_ERR(resamplefd);
330 goto fail;
331 }
332
333 irqfd->resamplefd = resamplefd;
334 INIT_LIST_HEAD(&irqfd->resampler_link);
335
336 mutex_lock(&kvm->irqfds.resampler_lock);
337
338 list_for_each_entry(resampler,
Alex Williamson49f8a1a2012-12-06 14:44:59 -0700339 &kvm->irqfds.resampler_list, link) {
Alex Williamson7a844282012-09-21 11:58:03 -0600340 if (resampler->notifier.gsi == irqfd->gsi) {
341 irqfd->resampler = resampler;
342 break;
343 }
344 }
345
346 if (!irqfd->resampler) {
347 resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
348 if (!resampler) {
349 ret = -ENOMEM;
350 mutex_unlock(&kvm->irqfds.resampler_lock);
351 goto fail;
352 }
353
354 resampler->kvm = kvm;
355 INIT_LIST_HEAD(&resampler->list);
356 resampler->notifier.gsi = irqfd->gsi;
357 resampler->notifier.irq_acked = irqfd_resampler_ack;
358 INIT_LIST_HEAD(&resampler->link);
359
360 list_add(&resampler->link, &kvm->irqfds.resampler_list);
361 kvm_register_irq_ack_notifier(kvm,
362 &resampler->notifier);
363 irqfd->resampler = resampler;
364 }
365
366 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100367 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600368
369 mutex_unlock(&kvm->irqfds.resampler_lock);
370 }
371
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400372 /*
373 * Install our own custom wake-up handling so we are notified via
374 * a callback whenever someone signals the underlying eventfd
375 */
376 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
377 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
378
Michael S. Tsirkinf1d1c302010-01-13 18:58:09 +0200379 spin_lock_irq(&kvm->irqfds.lock);
380
381 ret = 0;
382 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
383 if (irqfd->eventfd != tmp->eventfd)
384 continue;
385 /* This fd is used for another irq already. */
386 ret = -EBUSY;
387 spin_unlock_irq(&kvm->irqfds.lock);
388 goto fail;
389 }
390
Paul Mackerras9957c862014-06-30 20:51:11 +1000391 idx = srcu_read_lock(&kvm->irq_srcu);
392 irqfd_update(kvm, irqfd);
393 srcu_read_unlock(&kvm->irq_srcu, idx);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200394
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400395 list_add_tail(&irqfd->list, &kvm->irqfds.items);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400396
Cornelia Huck684a0b72014-03-17 19:11:35 +0100397 spin_unlock_irq(&kvm->irqfds.lock);
398
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400399 /*
400 * Check if there was an event already pending on the eventfd
401 * before we registered, and trigger it as if we didn't miss it.
402 */
Cornelia Huck684a0b72014-03-17 19:11:35 +0100403 events = f.file->f_op->poll(f.file, &irqfd->pt);
404
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400405 if (events & POLLIN)
406 schedule_work(&irqfd->inject);
407
408 /*
409 * do not drop the file until the irqfd is fully initialized, otherwise
410 * we might race against the POLLHUP
411 */
Al Virocffe78d2013-08-30 15:47:17 -0400412 fdput(f);
Eric Auger9016cfb2015-09-18 22:29:44 +0800413#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
414 irqfd->consumer.token = (void *)irqfd->eventfd;
415 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
416 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
417 irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
418 irqfd->consumer.start = kvm_arch_irq_bypass_start;
419 ret = irq_bypass_register_consumer(&irqfd->consumer);
420 if (ret)
421 pr_info("irq bypass consumer (token %p) registration fails: %d\n",
422 irqfd->consumer.token, ret);
423#endif
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400424
425 return 0;
426
427fail:
Alex Williamson7a844282012-09-21 11:58:03 -0600428 if (irqfd->resampler)
429 irqfd_resampler_shutdown(irqfd);
430
431 if (resamplefd && !IS_ERR(resamplefd))
432 eventfd_ctx_put(resamplefd);
433
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400434 if (eventfd && !IS_ERR(eventfd))
435 eventfd_ctx_put(eventfd);
436
Al Virocffe78d2013-08-30 15:47:17 -0400437 fdput(f);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400438
Al Virocffe78d2013-08-30 15:47:17 -0400439out:
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400440 kfree(irqfd);
441 return ret;
442}
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200443
444bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
445{
446 struct kvm_irq_ack_notifier *kian;
447 int gsi, idx;
448
449 idx = srcu_read_lock(&kvm->irq_srcu);
450 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
451 if (gsi != -1)
452 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
453 link)
454 if (kian->gsi == gsi) {
455 srcu_read_unlock(&kvm->irq_srcu, idx);
456 return true;
457 }
458
459 srcu_read_unlock(&kvm->irq_srcu, idx);
460
461 return false;
462}
463EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
464
Andrey Smetaninba1aefc2015-10-16 10:07:46 +0300465void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200466{
467 struct kvm_irq_ack_notifier *kian;
Andrey Smetaninba1aefc2015-10-16 10:07:46 +0300468
469 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
470 link)
471 if (kian->gsi == gsi)
472 kian->irq_acked(kian);
473}
474
475void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
476{
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200477 int gsi, idx;
478
479 trace_kvm_ack_irq(irqchip, pin);
480
481 idx = srcu_read_lock(&kvm->irq_srcu);
482 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
483 if (gsi != -1)
Andrey Smetaninba1aefc2015-10-16 10:07:46 +0300484 kvm_notify_acked_gsi(kvm, gsi);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200485 srcu_read_unlock(&kvm->irq_srcu, idx);
486}
487
488void kvm_register_irq_ack_notifier(struct kvm *kvm,
489 struct kvm_irq_ack_notifier *kian)
490{
491 mutex_lock(&kvm->irq_lock);
492 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
493 mutex_unlock(&kvm->irq_lock);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200494 kvm_vcpu_request_scan_ioapic(kvm);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200495}
496
497void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
498 struct kvm_irq_ack_notifier *kian)
499{
500 mutex_lock(&kvm->irq_lock);
501 hlist_del_init_rcu(&kian->link);
502 mutex_unlock(&kvm->irq_lock);
503 synchronize_srcu(&kvm->irq_srcu);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200504 kvm_vcpu_request_scan_ioapic(kvm);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200505}
Alexander Graf914daba2012-10-09 00:22:59 +0200506#endif
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400507
508void
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400509kvm_eventfd_init(struct kvm *kvm)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400510{
Paul Mackerras297e2102014-06-30 20:51:13 +1000511#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400512 spin_lock_init(&kvm->irqfds.lock);
513 INIT_LIST_HEAD(&kvm->irqfds.items);
Alex Williamson7a844282012-09-21 11:58:03 -0600514 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
515 mutex_init(&kvm->irqfds.resampler_lock);
Alexander Graf914daba2012-10-09 00:22:59 +0200516#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400517 INIT_LIST_HEAD(&kvm->ioeventfds);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400518}
519
Paul Mackerras297e2102014-06-30 20:51:13 +1000520#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400521/*
522 * shutdown any irqfd's that match fd+gsi
523 */
524static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600525kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400526{
Eric Auger166c9772015-09-18 22:29:42 +0800527 struct kvm_kernel_irqfd *irqfd, *tmp;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400528 struct eventfd_ctx *eventfd;
529
Alex Williamsond4db2932012-06-29 09:56:08 -0600530 eventfd = eventfd_ctx_fdget(args->fd);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400531 if (IS_ERR(eventfd))
532 return PTR_ERR(eventfd);
533
534 spin_lock_irq(&kvm->irqfds.lock);
535
536 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
Alex Williamsond4db2932012-06-29 09:56:08 -0600537 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200538 /*
Paul Mackerras56f89f32014-06-30 20:51:09 +1000539 * This clearing of irq_entry.type is needed for when
Michael S. Tsirkinc8ce0572011-03-06 13:03:26 +0200540 * another thread calls kvm_irq_routing_update before
541 * we flush workqueue below (we synchronize with
542 * kvm_irq_routing_update using irqfds.lock).
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200543 */
Paul Mackerras56f89f32014-06-30 20:51:09 +1000544 write_seqcount_begin(&irqfd->irq_entry_sc);
545 irqfd->irq_entry.type = 0;
546 write_seqcount_end(&irqfd->irq_entry_sc);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400547 irqfd_deactivate(irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200548 }
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400549 }
550
551 spin_unlock_irq(&kvm->irqfds.lock);
552 eventfd_ctx_put(eventfd);
553
554 /*
555 * Block until we know all outstanding shutdown jobs have completed
556 * so that we guarantee there will not be any more interrupts on this
557 * gsi once this deassign function returns.
558 */
559 flush_workqueue(irqfd_cleanup_wq);
560
561 return 0;
562}
563
564int
Alex Williamsond4db2932012-06-29 09:56:08 -0600565kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400566{
Alex Williamson7a844282012-09-21 11:58:03 -0600567 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
Alex Williamson326cf032012-06-29 09:56:24 -0600568 return -EINVAL;
569
Alex Williamsond4db2932012-06-29 09:56:08 -0600570 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
571 return kvm_irqfd_deassign(kvm, args);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400572
Alex Williamsond4db2932012-06-29 09:56:08 -0600573 return kvm_irqfd_assign(kvm, args);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400574}
575
576/*
577 * This function is called as the kvm VM fd is being released. Shutdown all
578 * irqfds that still remain open
579 */
580void
581kvm_irqfd_release(struct kvm *kvm)
582{
Eric Auger166c9772015-09-18 22:29:42 +0800583 struct kvm_kernel_irqfd *irqfd, *tmp;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400584
585 spin_lock_irq(&kvm->irqfds.lock);
586
587 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
588 irqfd_deactivate(irqfd);
589
590 spin_unlock_irq(&kvm->irqfds.lock);
591
592 /*
593 * Block until we know all outstanding shutdown jobs have completed
594 * since we do not take a kvm* reference.
595 */
596 flush_workqueue(irqfd_cleanup_wq);
597
598}
599
600/*
Paul Mackerras9957c862014-06-30 20:51:11 +1000601 * Take note of a change in irq routing.
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100602 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200603 */
Paul Mackerras9957c862014-06-30 20:51:11 +1000604void kvm_irq_routing_update(struct kvm *kvm)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200605{
Eric Auger166c9772015-09-18 22:29:42 +0800606 struct kvm_kernel_irqfd *irqfd;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200607
608 spin_lock_irq(&kvm->irqfds.lock);
609
Feng Wuf70c20a2015-09-18 22:29:53 +0800610 list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
Paul Mackerras9957c862014-06-30 20:51:11 +1000611 irqfd_update(kvm, irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200612
Feng Wuf70c20a2015-09-18 22:29:53 +0800613#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
614 if (irqfd->producer) {
615 int ret = kvm_arch_update_irqfd_routing(
616 irqfd->kvm, irqfd->producer->irq,
617 irqfd->gsi, 1);
618 WARN_ON(ret);
619 }
620#endif
621 }
622
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200623 spin_unlock_irq(&kvm->irqfds.lock);
624}
625
626/*
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400627 * create a host-wide workqueue for issuing deferred shutdown requests
628 * aggregated from all vm* instances. We need our own isolated single-thread
629 * queue to prevent deadlock against flushing the normal work-queue.
630 */
Cornelia Hucka0f155e2013-02-28 12:33:18 +0100631int kvm_irqfd_init(void)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400632{
633 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
634 if (!irqfd_cleanup_wq)
635 return -ENOMEM;
636
637 return 0;
638}
639
Cornelia Hucka0f155e2013-02-28 12:33:18 +0100640void kvm_irqfd_exit(void)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400641{
642 destroy_workqueue(irqfd_cleanup_wq);
643}
Alexander Graf914daba2012-10-09 00:22:59 +0200644#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400645
646/*
647 * --------------------------------------------------------------------
648 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
649 *
650 * userspace can register a PIO/MMIO address with an eventfd for receiving
651 * notification when the memory has been touched.
652 * --------------------------------------------------------------------
653 */
654
655struct _ioeventfd {
656 struct list_head list;
657 u64 addr;
658 int length;
659 struct eventfd_ctx *eventfd;
660 u64 datamatch;
661 struct kvm_io_device dev;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300662 u8 bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400663 bool wildcard;
664};
665
666static inline struct _ioeventfd *
667to_ioeventfd(struct kvm_io_device *dev)
668{
669 return container_of(dev, struct _ioeventfd, dev);
670}
671
672static void
673ioeventfd_release(struct _ioeventfd *p)
674{
675 eventfd_ctx_put(p->eventfd);
676 list_del(&p->list);
677 kfree(p);
678}
679
680static bool
681ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
682{
683 u64 _val;
684
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300685 if (addr != p->addr)
686 /* address must be precise for a hit */
687 return false;
688
689 if (!p->length)
690 /* length = 0 means only look at the address, so always a hit */
691 return true;
692
693 if (len != p->length)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400694 /* address-range must be precise for a hit */
695 return false;
696
697 if (p->wildcard)
698 /* all else equal, wildcard is always a hit */
699 return true;
700
701 /* otherwise, we have to actually compare the data */
702
703 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
704
705 switch (len) {
706 case 1:
707 _val = *(u8 *)val;
708 break;
709 case 2:
710 _val = *(u16 *)val;
711 break;
712 case 4:
713 _val = *(u32 *)val;
714 break;
715 case 8:
716 _val = *(u64 *)val;
717 break;
718 default:
719 return false;
720 }
721
722 return _val == p->datamatch ? true : false;
723}
724
725/* MMIO/PIO writes trigger an event if the addr/val match */
726static int
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +0000727ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
728 int len, const void *val)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400729{
730 struct _ioeventfd *p = to_ioeventfd(this);
731
732 if (!ioeventfd_in_range(p, addr, len, val))
733 return -EOPNOTSUPP;
734
735 eventfd_signal(p->eventfd, 1);
736 return 0;
737}
738
739/*
740 * This function is called as KVM is completely shutting down. We do not
741 * need to worry about locking just nuke anything we have as quickly as possible
742 */
743static void
744ioeventfd_destructor(struct kvm_io_device *this)
745{
746 struct _ioeventfd *p = to_ioeventfd(this);
747
748 ioeventfd_release(p);
749}
750
751static const struct kvm_io_device_ops ioeventfd_ops = {
752 .write = ioeventfd_write,
753 .destructor = ioeventfd_destructor,
754};
755
756/* assumes kvm->slots_lock held */
757static bool
758ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
759{
760 struct _ioeventfd *_p;
761
762 list_for_each_entry(_p, &kvm->ioeventfds, list)
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300763 if (_p->bus_idx == p->bus_idx &&
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300764 _p->addr == p->addr &&
765 (!_p->length || !p->length ||
766 (_p->length == p->length &&
767 (_p->wildcard || p->wildcard ||
768 _p->datamatch == p->datamatch))))
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400769 return true;
770
771 return false;
772}
773
Cornelia Huck2b834512013-02-28 12:33:20 +0100774static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
775{
776 if (flags & KVM_IOEVENTFD_FLAG_PIO)
777 return KVM_PIO_BUS;
778 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
779 return KVM_VIRTIO_CCW_NOTIFY_BUS;
780 return KVM_MMIO_BUS;
781}
782
Jason Wang85da11c2015-09-15 14:41:55 +0800783static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
784 enum kvm_bus bus_idx,
785 struct kvm_ioeventfd *args)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400786{
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400787
Jason Wang85da11c2015-09-15 14:41:55 +0800788 struct eventfd_ctx *eventfd;
789 struct _ioeventfd *p;
790 int ret;
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300791
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400792 eventfd = eventfd_ctx_fdget(args->fd);
793 if (IS_ERR(eventfd))
794 return PTR_ERR(eventfd);
795
796 p = kzalloc(sizeof(*p), GFP_KERNEL);
797 if (!p) {
798 ret = -ENOMEM;
799 goto fail;
800 }
801
802 INIT_LIST_HEAD(&p->list);
803 p->addr = args->addr;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300804 p->bus_idx = bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400805 p->length = args->len;
806 p->eventfd = eventfd;
807
808 /* The datamatch feature is optional, otherwise this is a wildcard */
809 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
810 p->datamatch = args->datamatch;
811 else
812 p->wildcard = true;
813
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200814 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400815
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300816 /* Verify that there isn't a match already */
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400817 if (ioeventfd_check_collision(kvm, p)) {
818 ret = -EEXIST;
819 goto unlock_fail;
820 }
821
822 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
823
Sasha Levin743eeb02011-07-27 16:00:48 +0300824 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
825 &p->dev);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400826 if (ret < 0)
827 goto unlock_fail;
828
Amos Kong6ea34c92013-05-25 06:44:15 +0800829 kvm->buses[bus_idx]->ioeventfd_count++;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400830 list_add_tail(&p->list, &kvm->ioeventfds);
831
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200832 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400833
834 return 0;
835
836unlock_fail:
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200837 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400838
839fail:
840 kfree(p);
841 eventfd_ctx_put(eventfd);
842
843 return ret;
844}
845
846static int
Jason Wang85da11c2015-09-15 14:41:55 +0800847kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
848 struct kvm_ioeventfd *args)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400849{
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400850 struct _ioeventfd *p, *tmp;
851 struct eventfd_ctx *eventfd;
852 int ret = -ENOENT;
853
854 eventfd = eventfd_ctx_fdget(args->fd);
855 if (IS_ERR(eventfd))
856 return PTR_ERR(eventfd);
857
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200858 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400859
860 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
861 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
862
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300863 if (p->bus_idx != bus_idx ||
864 p->eventfd != eventfd ||
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400865 p->addr != args->addr ||
866 p->length != args->len ||
867 p->wildcard != wildcard)
868 continue;
869
870 if (!p->wildcard && p->datamatch != args->datamatch)
871 continue;
872
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200873 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
Amos Kong6ea34c92013-05-25 06:44:15 +0800874 kvm->buses[bus_idx]->ioeventfd_count--;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400875 ioeventfd_release(p);
876 ret = 0;
877 break;
878 }
879
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200880 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400881
882 eventfd_ctx_put(eventfd);
883
884 return ret;
885}
886
Jason Wang85da11c2015-09-15 14:41:55 +0800887static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
888{
889 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
Jason Wangeefd6b02015-09-15 14:41:56 +0800890 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
Jason Wang85da11c2015-09-15 14:41:55 +0800891
Jason Wangeefd6b02015-09-15 14:41:56 +0800892 if (!args->len && bus_idx == KVM_MMIO_BUS)
893 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
894
895 return ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800896}
897
898static int
899kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
900{
901 enum kvm_bus bus_idx;
Jason Wangeefd6b02015-09-15 14:41:56 +0800902 int ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800903
904 bus_idx = ioeventfd_bus_from_flags(args->flags);
905 /* must be natural-word sized, or 0 to ignore length */
906 switch (args->len) {
907 case 0:
908 case 1:
909 case 2:
910 case 4:
911 case 8:
912 break;
913 default:
914 return -EINVAL;
915 }
916
917 /* check for range overflow */
918 if (args->addr + args->len < args->addr)
919 return -EINVAL;
920
921 /* check for extra flags that we don't understand */
922 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
923 return -EINVAL;
924
925 /* ioeventfd with no length can't be combined with DATAMATCH */
Jason Wange9ea5062015-09-15 14:41:59 +0800926 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
Jason Wang85da11c2015-09-15 14:41:55 +0800927 return -EINVAL;
928
Jason Wangeefd6b02015-09-15 14:41:56 +0800929 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
930 if (ret)
931 goto fail;
932
933 /* When length is ignored, MMIO is also put on a separate bus, for
934 * faster lookups.
935 */
936 if (!args->len && bus_idx == KVM_MMIO_BUS) {
937 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
938 if (ret < 0)
939 goto fast_fail;
940 }
941
942 return 0;
943
944fast_fail:
945 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
946fail:
947 return ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800948}
949
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400950int
951kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
952{
953 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
954 return kvm_deassign_ioeventfd(kvm, args);
955
956 return kvm_assign_ioeventfd(kvm, args);
957}