blob: d016aadd5fbb633d60189502e42e156c3ff6ed64 [file] [log] [blame]
Gregory Haskins721eecbf2009-05-20 10:30:49 -04001/*
2 * kvm eventfd support - use eventfd objects to signal various KVM events
3 *
4 * Copyright 2009 Novell. All Rights Reserved.
Avi Kivity221d0592010-05-23 18:37:00 +03005 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Gregory Haskins721eecbf2009-05-20 10:30:49 -04006 *
7 * Author:
8 * Gregory Haskins <ghaskins@novell.com>
9 *
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 */
23
24#include <linux/kvm_host.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040025#include <linux/kvm.h>
Eric Auger166c9772015-09-18 22:29:42 +080026#include <linux/kvm_irqfd.h>
Gregory Haskins721eecbf2009-05-20 10:30:49 -040027#include <linux/workqueue.h>
28#include <linux/syscalls.h>
29#include <linux/wait.h>
30#include <linux/poll.h>
31#include <linux/file.h>
32#include <linux/list.h>
33#include <linux/eventfd.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040034#include <linux/kernel.h>
Christian Borntraeger719d93c2014-01-16 13:44:20 +010035#include <linux/srcu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Paul Mackerras56f89f32014-06-30 20:51:09 +100037#include <linux/seqlock.h>
Eric Auger9016cfb2015-09-18 22:29:44 +080038#include <linux/irqbypass.h>
Paul Mackerrase4d57e12014-06-30 20:51:12 +100039#include <trace/events/kvm.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040040
Andre Przywaraaf669ac2015-03-26 14:39:29 +000041#include <kvm/iodev.h>
Gregory Haskins721eecbf2009-05-20 10:30:49 -040042
Paul Mackerras297e2102014-06-30 20:51:13 +100043#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -040044
Paolo Bonzini36343f62016-10-26 13:35:56 +020045static struct workqueue_struct *irqfd_cleanup_wq;
Gregory Haskins721eecbf2009-05-20 10:30:49 -040046
47static void
48irqfd_inject(struct work_struct *work)
49{
Eric Auger166c9772015-09-18 22:29:42 +080050 struct kvm_kernel_irqfd *irqfd =
51 container_of(work, struct kvm_kernel_irqfd, inject);
Gregory Haskins721eecbf2009-05-20 10:30:49 -040052 struct kvm *kvm = irqfd->kvm;
53
Alex Williamson7a844282012-09-21 11:58:03 -060054 if (!irqfd->resampler) {
Yang Zhangaa2fbe62013-04-11 19:21:40 +080055 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
56 false);
57 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
58 false);
Alex Williamson7a844282012-09-21 11:58:03 -060059 } else
60 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +080061 irqfd->gsi, 1, false);
Alex Williamson7a844282012-09-21 11:58:03 -060062}
63
64/*
65 * Since resampler irqfds share an IRQ source ID, we de-assert once
66 * then notify all of the resampler irqfds using this GSI. We can't
67 * do multiple de-asserts or we risk racing with incoming re-asserts.
68 */
69static void
70irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
71{
Eric Auger166c9772015-09-18 22:29:42 +080072 struct kvm_kernel_irqfd_resampler *resampler;
Christian Borntraeger719d93c2014-01-16 13:44:20 +010073 struct kvm *kvm;
Eric Auger166c9772015-09-18 22:29:42 +080074 struct kvm_kernel_irqfd *irqfd;
Christian Borntraeger719d93c2014-01-16 13:44:20 +010075 int idx;
Alex Williamson7a844282012-09-21 11:58:03 -060076
Eric Auger166c9772015-09-18 22:29:42 +080077 resampler = container_of(kian,
78 struct kvm_kernel_irqfd_resampler, notifier);
Christian Borntraeger719d93c2014-01-16 13:44:20 +010079 kvm = resampler->kvm;
Alex Williamson7a844282012-09-21 11:58:03 -060080
Christian Borntraeger719d93c2014-01-16 13:44:20 +010081 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +080082 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -060083
Christian Borntraeger719d93c2014-01-16 13:44:20 +010084 idx = srcu_read_lock(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -060085
86 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
87 eventfd_signal(irqfd->resamplefd, 1);
88
Christian Borntraeger719d93c2014-01-16 13:44:20 +010089 srcu_read_unlock(&kvm->irq_srcu, idx);
Alex Williamson7a844282012-09-21 11:58:03 -060090}
91
92static void
Eric Auger166c9772015-09-18 22:29:42 +080093irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
Alex Williamson7a844282012-09-21 11:58:03 -060094{
Eric Auger166c9772015-09-18 22:29:42 +080095 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
Alex Williamson7a844282012-09-21 11:58:03 -060096 struct kvm *kvm = resampler->kvm;
97
98 mutex_lock(&kvm->irqfds.resampler_lock);
99
100 list_del_rcu(&irqfd->resampler_link);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100101 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600102
103 if (list_empty(&resampler->list)) {
104 list_del(&resampler->link);
105 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
106 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800107 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -0600108 kfree(resampler);
109 }
110
111 mutex_unlock(&kvm->irqfds.resampler_lock);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400112}
113
114/*
115 * Race-free decouple logic (ordering is critical)
116 */
117static void
118irqfd_shutdown(struct work_struct *work)
119{
Eric Auger166c9772015-09-18 22:29:42 +0800120 struct kvm_kernel_irqfd *irqfd =
121 container_of(work, struct kvm_kernel_irqfd, shutdown);
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200122 u64 cnt;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400123
124 /*
125 * Synchronize with the wait-queue and unhook ourselves to prevent
126 * further events.
127 */
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200128 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400129
130 /*
131 * We know no new events will be scheduled at this point, so block
132 * until all previously outstanding events have completed
133 */
Tejun Heo43829732012-08-20 14:51:24 -0700134 flush_work(&irqfd->inject);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400135
Alex Williamson7a844282012-09-21 11:58:03 -0600136 if (irqfd->resampler) {
137 irqfd_resampler_shutdown(irqfd);
138 eventfd_ctx_put(irqfd->resamplefd);
139 }
140
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400141 /*
142 * It is now safe to release the object's resources
143 */
Eric Auger9016cfb2015-09-18 22:29:44 +0800144#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
145 irq_bypass_unregister_consumer(&irqfd->consumer);
146#endif
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400147 eventfd_ctx_put(irqfd->eventfd);
148 kfree(irqfd);
149}
150
151
152/* assumes kvm->irqfds.lock is held */
153static bool
Eric Auger166c9772015-09-18 22:29:42 +0800154irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400155{
156 return list_empty(&irqfd->list) ? false : true;
157}
158
159/*
160 * Mark the irqfd as inactive and schedule it for removal
161 *
162 * assumes kvm->irqfds.lock is held
163 */
164static void
Eric Auger166c9772015-09-18 22:29:42 +0800165irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400166{
167 BUG_ON(!irqfd_is_active(irqfd));
168
169 list_del_init(&irqfd->list);
170
Paolo Bonzini36343f62016-10-26 13:35:56 +0200171 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400172}
173
Paolo Bonzinib97e6de2015-10-28 19:16:47 +0100174int __attribute__((weak)) kvm_arch_set_irq_inatomic(
Andrey Smetaninc9a5ecc2015-10-16 10:07:47 +0300175 struct kvm_kernel_irq_routing_entry *irq,
176 struct kvm *kvm, int irq_source_id,
177 int level,
178 bool line_status)
179{
180 return -EWOULDBLOCK;
181}
182
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400183/*
184 * Called with wqh->lock held and interrupts disabled
185 */
186static int
187irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
188{
Eric Auger166c9772015-09-18 22:29:42 +0800189 struct kvm_kernel_irqfd *irqfd =
190 container_of(wait, struct kvm_kernel_irqfd, wait);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400191 unsigned long flags = (unsigned long)key;
Paul Mackerras56f89f32014-06-30 20:51:09 +1000192 struct kvm_kernel_irq_routing_entry irq;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200193 struct kvm *kvm = irqfd->kvm;
Paul Mackerras56f89f32014-06-30 20:51:09 +1000194 unsigned seq;
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100195 int idx;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400196
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200197 if (flags & POLLIN) {
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100198 idx = srcu_read_lock(&kvm->irq_srcu);
Paul Mackerras56f89f32014-06-30 20:51:09 +1000199 do {
200 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
201 irq = irqfd->irq_entry;
202 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400203 /* An event has been signaled, inject an interrupt */
Paolo Bonzinib97e6de2015-10-28 19:16:47 +0100204 if (kvm_arch_set_irq_inatomic(&irq, kvm,
205 KVM_USERSPACE_IRQ_SOURCE_ID, 1,
206 false) == -EWOULDBLOCK)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200207 schedule_work(&irqfd->inject);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100208 srcu_read_unlock(&kvm->irq_srcu, idx);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200209 }
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400210
211 if (flags & POLLHUP) {
212 /* The eventfd is closing, detach from KVM */
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400213 unsigned long flags;
214
215 spin_lock_irqsave(&kvm->irqfds.lock, flags);
216
217 /*
218 * We must check if someone deactivated the irqfd before
219 * we could acquire the irqfds.lock since the item is
220 * deactivated from the KVM side before it is unhooked from
221 * the wait-queue. If it is already deactivated, we can
222 * simply return knowing the other side will cleanup for us.
223 * We cannot race against the irqfd going away since the
224 * other side is required to acquire wqh->lock, which we hold
225 */
226 if (irqfd_is_active(irqfd))
227 irqfd_deactivate(irqfd);
228
229 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
230 }
231
232 return 0;
233}
234
235static void
236irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
237 poll_table *pt)
238{
Eric Auger166c9772015-09-18 22:29:42 +0800239 struct kvm_kernel_irqfd *irqfd =
240 container_of(pt, struct kvm_kernel_irqfd, pt);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400241 add_wait_queue(wqh, &irqfd->wait);
242}
243
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200244/* Must be called under irqfds.lock */
Eric Auger166c9772015-09-18 22:29:42 +0800245static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200246{
247 struct kvm_kernel_irq_routing_entry *e;
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000248 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
Andrey Smetanin351dc6472015-10-16 10:07:45 +0300249 int n_entries;
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000250
Paul Mackerras9957c862014-06-30 20:51:11 +1000251 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200252
Paul Mackerras56f89f32014-06-30 20:51:09 +1000253 write_seqcount_begin(&irqfd->irq_entry_sc);
254
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000255 e = entries;
Andrey Smetanin351dc6472015-10-16 10:07:45 +0300256 if (n_entries == 1)
257 irqfd->irq_entry = *e;
258 else
259 irqfd->irq_entry.type = 0;
Paul Mackerras56f89f32014-06-30 20:51:09 +1000260
Paul Mackerras56f89f32014-06-30 20:51:09 +1000261 write_seqcount_end(&irqfd->irq_entry_sc);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200262}
263
Eric Auger1a02b272015-09-18 22:29:43 +0800264#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
265void __attribute__((weak)) kvm_arch_irq_bypass_stop(
266 struct irq_bypass_consumer *cons)
267{
268}
269
270void __attribute__((weak)) kvm_arch_irq_bypass_start(
271 struct irq_bypass_consumer *cons)
272{
273}
Feng Wuf70c20a2015-09-18 22:29:53 +0800274
275int __attribute__((weak)) kvm_arch_update_irqfd_routing(
276 struct kvm *kvm, unsigned int host_irq,
277 uint32_t guest_irq, bool set)
278{
279 return 0;
280}
Eric Auger1a02b272015-09-18 22:29:43 +0800281#endif
282
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400283static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600284kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400285{
Eric Auger166c9772015-09-18 22:29:42 +0800286 struct kvm_kernel_irqfd *irqfd, *tmp;
Al Virocffe78d2013-08-30 15:47:17 -0400287 struct fd f;
Alex Williamson7a844282012-09-21 11:58:03 -0600288 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400289 int ret;
290 unsigned int events;
Paul Mackerras9957c862014-06-30 20:51:11 +1000291 int idx;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400292
Eric Auger01c94e62015-03-04 11:14:33 +0100293 if (!kvm_arch_intc_initialized(kvm))
294 return -EAGAIN;
295
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400296 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
297 if (!irqfd)
298 return -ENOMEM;
299
300 irqfd->kvm = kvm;
Alex Williamsond4db2932012-06-29 09:56:08 -0600301 irqfd->gsi = args->gsi;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400302 INIT_LIST_HEAD(&irqfd->list);
303 INIT_WORK(&irqfd->inject, irqfd_inject);
304 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
Paul Mackerras56f89f32014-06-30 20:51:09 +1000305 seqcount_init(&irqfd->irq_entry_sc);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400306
Al Virocffe78d2013-08-30 15:47:17 -0400307 f = fdget(args->fd);
308 if (!f.file) {
309 ret = -EBADF;
310 goto out;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400311 }
312
Al Virocffe78d2013-08-30 15:47:17 -0400313 eventfd = eventfd_ctx_fileget(f.file);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400314 if (IS_ERR(eventfd)) {
315 ret = PTR_ERR(eventfd);
316 goto fail;
317 }
318
319 irqfd->eventfd = eventfd;
320
Alex Williamson7a844282012-09-21 11:58:03 -0600321 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
Eric Auger166c9772015-09-18 22:29:42 +0800322 struct kvm_kernel_irqfd_resampler *resampler;
Alex Williamson7a844282012-09-21 11:58:03 -0600323
324 resamplefd = eventfd_ctx_fdget(args->resamplefd);
325 if (IS_ERR(resamplefd)) {
326 ret = PTR_ERR(resamplefd);
327 goto fail;
328 }
329
330 irqfd->resamplefd = resamplefd;
331 INIT_LIST_HEAD(&irqfd->resampler_link);
332
333 mutex_lock(&kvm->irqfds.resampler_lock);
334
335 list_for_each_entry(resampler,
Alex Williamson49f8a1a2012-12-06 14:44:59 -0700336 &kvm->irqfds.resampler_list, link) {
Alex Williamson7a844282012-09-21 11:58:03 -0600337 if (resampler->notifier.gsi == irqfd->gsi) {
338 irqfd->resampler = resampler;
339 break;
340 }
341 }
342
343 if (!irqfd->resampler) {
344 resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
345 if (!resampler) {
346 ret = -ENOMEM;
347 mutex_unlock(&kvm->irqfds.resampler_lock);
348 goto fail;
349 }
350
351 resampler->kvm = kvm;
352 INIT_LIST_HEAD(&resampler->list);
353 resampler->notifier.gsi = irqfd->gsi;
354 resampler->notifier.irq_acked = irqfd_resampler_ack;
355 INIT_LIST_HEAD(&resampler->link);
356
357 list_add(&resampler->link, &kvm->irqfds.resampler_list);
358 kvm_register_irq_ack_notifier(kvm,
359 &resampler->notifier);
360 irqfd->resampler = resampler;
361 }
362
363 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100364 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600365
366 mutex_unlock(&kvm->irqfds.resampler_lock);
367 }
368
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400369 /*
370 * Install our own custom wake-up handling so we are notified via
371 * a callback whenever someone signals the underlying eventfd
372 */
373 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
374 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
375
Michael S. Tsirkinf1d1c302010-01-13 18:58:09 +0200376 spin_lock_irq(&kvm->irqfds.lock);
377
378 ret = 0;
379 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
380 if (irqfd->eventfd != tmp->eventfd)
381 continue;
382 /* This fd is used for another irq already. */
383 ret = -EBUSY;
384 spin_unlock_irq(&kvm->irqfds.lock);
385 goto fail;
386 }
387
Paul Mackerras9957c862014-06-30 20:51:11 +1000388 idx = srcu_read_lock(&kvm->irq_srcu);
389 irqfd_update(kvm, irqfd);
390 srcu_read_unlock(&kvm->irq_srcu, idx);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200391
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400392 list_add_tail(&irqfd->list, &kvm->irqfds.items);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400393
Cornelia Huck684a0b72014-03-17 19:11:35 +0100394 spin_unlock_irq(&kvm->irqfds.lock);
395
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400396 /*
397 * Check if there was an event already pending on the eventfd
398 * before we registered, and trigger it as if we didn't miss it.
399 */
Cornelia Huck684a0b72014-03-17 19:11:35 +0100400 events = f.file->f_op->poll(f.file, &irqfd->pt);
401
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400402 if (events & POLLIN)
403 schedule_work(&irqfd->inject);
404
405 /*
406 * do not drop the file until the irqfd is fully initialized, otherwise
407 * we might race against the POLLHUP
408 */
Al Virocffe78d2013-08-30 15:47:17 -0400409 fdput(f);
Eric Auger9016cfb2015-09-18 22:29:44 +0800410#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
Alex Williamson14717e22016-05-05 11:58:35 -0600411 if (kvm_arch_has_irq_bypass()) {
412 irqfd->consumer.token = (void *)irqfd->eventfd;
413 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
414 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
415 irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
416 irqfd->consumer.start = kvm_arch_irq_bypass_start;
417 ret = irq_bypass_register_consumer(&irqfd->consumer);
418 if (ret)
419 pr_info("irq bypass consumer (token %p) registration fails: %d\n",
Eric Auger9016cfb2015-09-18 22:29:44 +0800420 irqfd->consumer.token, ret);
Alex Williamson14717e22016-05-05 11:58:35 -0600421 }
Eric Auger9016cfb2015-09-18 22:29:44 +0800422#endif
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400423
424 return 0;
425
426fail:
Alex Williamson7a844282012-09-21 11:58:03 -0600427 if (irqfd->resampler)
428 irqfd_resampler_shutdown(irqfd);
429
430 if (resamplefd && !IS_ERR(resamplefd))
431 eventfd_ctx_put(resamplefd);
432
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400433 if (eventfd && !IS_ERR(eventfd))
434 eventfd_ctx_put(eventfd);
435
Al Virocffe78d2013-08-30 15:47:17 -0400436 fdput(f);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400437
Al Virocffe78d2013-08-30 15:47:17 -0400438out:
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400439 kfree(irqfd);
440 return ret;
441}
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200442
443bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
444{
445 struct kvm_irq_ack_notifier *kian;
446 int gsi, idx;
447
448 idx = srcu_read_lock(&kvm->irq_srcu);
449 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
450 if (gsi != -1)
451 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
452 link)
453 if (kian->gsi == gsi) {
454 srcu_read_unlock(&kvm->irq_srcu, idx);
455 return true;
456 }
457
458 srcu_read_unlock(&kvm->irq_srcu, idx);
459
460 return false;
461}
462EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
463
Andrey Smetaninba1aefc2015-10-16 10:07:46 +0300464void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200465{
466 struct kvm_irq_ack_notifier *kian;
Andrey Smetaninba1aefc2015-10-16 10:07:46 +0300467
468 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
469 link)
470 if (kian->gsi == gsi)
471 kian->irq_acked(kian);
472}
473
474void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
475{
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200476 int gsi, idx;
477
478 trace_kvm_ack_irq(irqchip, pin);
479
480 idx = srcu_read_lock(&kvm->irq_srcu);
481 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
482 if (gsi != -1)
Andrey Smetaninba1aefc2015-10-16 10:07:46 +0300483 kvm_notify_acked_gsi(kvm, gsi);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200484 srcu_read_unlock(&kvm->irq_srcu, idx);
485}
486
487void kvm_register_irq_ack_notifier(struct kvm *kvm,
488 struct kvm_irq_ack_notifier *kian)
489{
490 mutex_lock(&kvm->irq_lock);
491 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
492 mutex_unlock(&kvm->irq_lock);
David Hildenbrand993225a2017-04-07 10:50:33 +0200493 kvm_arch_post_irq_ack_notifier_list_update(kvm);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200494}
495
496void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
497 struct kvm_irq_ack_notifier *kian)
498{
499 mutex_lock(&kvm->irq_lock);
500 hlist_del_init_rcu(&kian->link);
501 mutex_unlock(&kvm->irq_lock);
502 synchronize_srcu(&kvm->irq_srcu);
David Hildenbrand993225a2017-04-07 10:50:33 +0200503 kvm_arch_post_irq_ack_notifier_list_update(kvm);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200504}
Alexander Graf914daba2012-10-09 00:22:59 +0200505#endif
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400506
507void
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400508kvm_eventfd_init(struct kvm *kvm)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400509{
Paul Mackerras297e2102014-06-30 20:51:13 +1000510#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400511 spin_lock_init(&kvm->irqfds.lock);
512 INIT_LIST_HEAD(&kvm->irqfds.items);
Alex Williamson7a844282012-09-21 11:58:03 -0600513 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
514 mutex_init(&kvm->irqfds.resampler_lock);
Alexander Graf914daba2012-10-09 00:22:59 +0200515#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400516 INIT_LIST_HEAD(&kvm->ioeventfds);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400517}
518
Paul Mackerras297e2102014-06-30 20:51:13 +1000519#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400520/*
521 * shutdown any irqfd's that match fd+gsi
522 */
523static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600524kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400525{
Eric Auger166c9772015-09-18 22:29:42 +0800526 struct kvm_kernel_irqfd *irqfd, *tmp;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400527 struct eventfd_ctx *eventfd;
528
Alex Williamsond4db2932012-06-29 09:56:08 -0600529 eventfd = eventfd_ctx_fdget(args->fd);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400530 if (IS_ERR(eventfd))
531 return PTR_ERR(eventfd);
532
533 spin_lock_irq(&kvm->irqfds.lock);
534
535 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
Alex Williamsond4db2932012-06-29 09:56:08 -0600536 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200537 /*
Paul Mackerras56f89f32014-06-30 20:51:09 +1000538 * This clearing of irq_entry.type is needed for when
Michael S. Tsirkinc8ce0572011-03-06 13:03:26 +0200539 * another thread calls kvm_irq_routing_update before
540 * we flush workqueue below (we synchronize with
541 * kvm_irq_routing_update using irqfds.lock).
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200542 */
Paul Mackerras56f89f32014-06-30 20:51:09 +1000543 write_seqcount_begin(&irqfd->irq_entry_sc);
544 irqfd->irq_entry.type = 0;
545 write_seqcount_end(&irqfd->irq_entry_sc);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400546 irqfd_deactivate(irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200547 }
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400548 }
549
550 spin_unlock_irq(&kvm->irqfds.lock);
551 eventfd_ctx_put(eventfd);
552
553 /*
554 * Block until we know all outstanding shutdown jobs have completed
555 * so that we guarantee there will not be any more interrupts on this
556 * gsi once this deassign function returns.
557 */
Paolo Bonzini36343f62016-10-26 13:35:56 +0200558 flush_workqueue(irqfd_cleanup_wq);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400559
560 return 0;
561}
562
563int
Alex Williamsond4db2932012-06-29 09:56:08 -0600564kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400565{
Alex Williamson7a844282012-09-21 11:58:03 -0600566 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
Alex Williamson326cf032012-06-29 09:56:24 -0600567 return -EINVAL;
568
Alex Williamsond4db2932012-06-29 09:56:08 -0600569 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
570 return kvm_irqfd_deassign(kvm, args);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400571
Alex Williamsond4db2932012-06-29 09:56:08 -0600572 return kvm_irqfd_assign(kvm, args);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400573}
574
575/*
576 * This function is called as the kvm VM fd is being released. Shutdown all
577 * irqfds that still remain open
578 */
579void
580kvm_irqfd_release(struct kvm *kvm)
581{
Eric Auger166c9772015-09-18 22:29:42 +0800582 struct kvm_kernel_irqfd *irqfd, *tmp;
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400583
584 spin_lock_irq(&kvm->irqfds.lock);
585
586 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
587 irqfd_deactivate(irqfd);
588
589 spin_unlock_irq(&kvm->irqfds.lock);
590
591 /*
592 * Block until we know all outstanding shutdown jobs have completed
593 * since we do not take a kvm* reference.
594 */
Paolo Bonzini36343f62016-10-26 13:35:56 +0200595 flush_workqueue(irqfd_cleanup_wq);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400596
597}
598
599/*
Paul Mackerras9957c862014-06-30 20:51:11 +1000600 * Take note of a change in irq routing.
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100601 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200602 */
Paul Mackerras9957c862014-06-30 20:51:11 +1000603void kvm_irq_routing_update(struct kvm *kvm)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200604{
Eric Auger166c9772015-09-18 22:29:42 +0800605 struct kvm_kernel_irqfd *irqfd;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200606
607 spin_lock_irq(&kvm->irqfds.lock);
608
Feng Wuf70c20a2015-09-18 22:29:53 +0800609 list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
Paul Mackerras9957c862014-06-30 20:51:11 +1000610 irqfd_update(kvm, irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200611
Feng Wuf70c20a2015-09-18 22:29:53 +0800612#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
613 if (irqfd->producer) {
614 int ret = kvm_arch_update_irqfd_routing(
615 irqfd->kvm, irqfd->producer->irq,
616 irqfd->gsi, 1);
617 WARN_ON(ret);
618 }
619#endif
620 }
621
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200622 spin_unlock_irq(&kvm->irqfds.lock);
623}
624
Paolo Bonzini36343f62016-10-26 13:35:56 +0200625/*
626 * create a host-wide workqueue for issuing deferred shutdown requests
627 * aggregated from all vm* instances. We need our own isolated
628 * queue to ease flushing work items when a VM exits.
629 */
630int kvm_irqfd_init(void)
631{
632 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
633 if (!irqfd_cleanup_wq)
634 return -ENOMEM;
635
636 return 0;
637}
638
Cornelia Hucka0f155e2013-02-28 12:33:18 +0100639void kvm_irqfd_exit(void)
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400640{
Paolo Bonzini36343f62016-10-26 13:35:56 +0200641 destroy_workqueue(irqfd_cleanup_wq);
Gregory Haskins721eecbf2009-05-20 10:30:49 -0400642}
Alexander Graf914daba2012-10-09 00:22:59 +0200643#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400644
645/*
646 * --------------------------------------------------------------------
647 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
648 *
649 * userspace can register a PIO/MMIO address with an eventfd for receiving
650 * notification when the memory has been touched.
651 * --------------------------------------------------------------------
652 */
653
654struct _ioeventfd {
655 struct list_head list;
656 u64 addr;
657 int length;
658 struct eventfd_ctx *eventfd;
659 u64 datamatch;
660 struct kvm_io_device dev;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300661 u8 bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400662 bool wildcard;
663};
664
665static inline struct _ioeventfd *
666to_ioeventfd(struct kvm_io_device *dev)
667{
668 return container_of(dev, struct _ioeventfd, dev);
669}
670
671static void
672ioeventfd_release(struct _ioeventfd *p)
673{
674 eventfd_ctx_put(p->eventfd);
675 list_del(&p->list);
676 kfree(p);
677}
678
679static bool
680ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
681{
682 u64 _val;
683
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300684 if (addr != p->addr)
685 /* address must be precise for a hit */
686 return false;
687
688 if (!p->length)
689 /* length = 0 means only look at the address, so always a hit */
690 return true;
691
692 if (len != p->length)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400693 /* address-range must be precise for a hit */
694 return false;
695
696 if (p->wildcard)
697 /* all else equal, wildcard is always a hit */
698 return true;
699
700 /* otherwise, we have to actually compare the data */
701
702 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
703
704 switch (len) {
705 case 1:
706 _val = *(u8 *)val;
707 break;
708 case 2:
709 _val = *(u16 *)val;
710 break;
711 case 4:
712 _val = *(u32 *)val;
713 break;
714 case 8:
715 _val = *(u64 *)val;
716 break;
717 default:
718 return false;
719 }
720
721 return _val == p->datamatch ? true : false;
722}
723
724/* MMIO/PIO writes trigger an event if the addr/val match */
725static int
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +0000726ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
727 int len, const void *val)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400728{
729 struct _ioeventfd *p = to_ioeventfd(this);
730
731 if (!ioeventfd_in_range(p, addr, len, val))
732 return -EOPNOTSUPP;
733
734 eventfd_signal(p->eventfd, 1);
735 return 0;
736}
737
738/*
739 * This function is called as KVM is completely shutting down. We do not
740 * need to worry about locking just nuke anything we have as quickly as possible
741 */
742static void
743ioeventfd_destructor(struct kvm_io_device *this)
744{
745 struct _ioeventfd *p = to_ioeventfd(this);
746
747 ioeventfd_release(p);
748}
749
750static const struct kvm_io_device_ops ioeventfd_ops = {
751 .write = ioeventfd_write,
752 .destructor = ioeventfd_destructor,
753};
754
755/* assumes kvm->slots_lock held */
756static bool
757ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
758{
759 struct _ioeventfd *_p;
760
761 list_for_each_entry(_p, &kvm->ioeventfds, list)
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300762 if (_p->bus_idx == p->bus_idx &&
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300763 _p->addr == p->addr &&
764 (!_p->length || !p->length ||
765 (_p->length == p->length &&
766 (_p->wildcard || p->wildcard ||
767 _p->datamatch == p->datamatch))))
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400768 return true;
769
770 return false;
771}
772
Cornelia Huck2b834512013-02-28 12:33:20 +0100773static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
774{
775 if (flags & KVM_IOEVENTFD_FLAG_PIO)
776 return KVM_PIO_BUS;
777 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
778 return KVM_VIRTIO_CCW_NOTIFY_BUS;
779 return KVM_MMIO_BUS;
780}
781
Jason Wang85da11c2015-09-15 14:41:55 +0800782static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
783 enum kvm_bus bus_idx,
784 struct kvm_ioeventfd *args)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400785{
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400786
Jason Wang85da11c2015-09-15 14:41:55 +0800787 struct eventfd_ctx *eventfd;
788 struct _ioeventfd *p;
789 int ret;
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300790
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400791 eventfd = eventfd_ctx_fdget(args->fd);
792 if (IS_ERR(eventfd))
793 return PTR_ERR(eventfd);
794
795 p = kzalloc(sizeof(*p), GFP_KERNEL);
796 if (!p) {
797 ret = -ENOMEM;
798 goto fail;
799 }
800
801 INIT_LIST_HEAD(&p->list);
802 p->addr = args->addr;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300803 p->bus_idx = bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400804 p->length = args->len;
805 p->eventfd = eventfd;
806
807 /* The datamatch feature is optional, otherwise this is a wildcard */
808 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
809 p->datamatch = args->datamatch;
810 else
811 p->wildcard = true;
812
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200813 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400814
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300815 /* Verify that there isn't a match already */
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400816 if (ioeventfd_check_collision(kvm, p)) {
817 ret = -EEXIST;
818 goto unlock_fail;
819 }
820
821 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
822
Sasha Levin743eeb02011-07-27 16:00:48 +0300823 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
824 &p->dev);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400825 if (ret < 0)
826 goto unlock_fail;
827
Christian Borntraeger4a12f952017-07-07 10:51:38 +0200828 kvm_get_bus(kvm, bus_idx)->ioeventfd_count++;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400829 list_add_tail(&p->list, &kvm->ioeventfds);
830
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200831 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400832
833 return 0;
834
835unlock_fail:
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200836 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400837
838fail:
839 kfree(p);
840 eventfd_ctx_put(eventfd);
841
842 return ret;
843}
844
845static int
Jason Wang85da11c2015-09-15 14:41:55 +0800846kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
847 struct kvm_ioeventfd *args)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400848{
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400849 struct _ioeventfd *p, *tmp;
850 struct eventfd_ctx *eventfd;
Christian Borntraeger4a12f952017-07-07 10:51:38 +0200851 struct kvm_io_bus *bus;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400852 int ret = -ENOENT;
853
854 eventfd = eventfd_ctx_fdget(args->fd);
855 if (IS_ERR(eventfd))
856 return PTR_ERR(eventfd);
857
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200858 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400859
860 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
861 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
862
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300863 if (p->bus_idx != bus_idx ||
864 p->eventfd != eventfd ||
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400865 p->addr != args->addr ||
866 p->length != args->len ||
867 p->wildcard != wildcard)
868 continue;
869
870 if (!p->wildcard && p->datamatch != args->datamatch)
871 continue;
872
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200873 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
Christian Borntraeger4a12f952017-07-07 10:51:38 +0200874 bus = kvm_get_bus(kvm, bus_idx);
875 if (bus)
876 bus->ioeventfd_count--;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400877 ioeventfd_release(p);
878 ret = 0;
879 break;
880 }
881
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200882 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400883
884 eventfd_ctx_put(eventfd);
885
886 return ret;
887}
888
Jason Wang85da11c2015-09-15 14:41:55 +0800889static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
890{
891 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
Jason Wangeefd6b02015-09-15 14:41:56 +0800892 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
Jason Wang85da11c2015-09-15 14:41:55 +0800893
Jason Wangeefd6b02015-09-15 14:41:56 +0800894 if (!args->len && bus_idx == KVM_MMIO_BUS)
895 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
896
897 return ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800898}
899
900static int
901kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
902{
903 enum kvm_bus bus_idx;
Jason Wangeefd6b02015-09-15 14:41:56 +0800904 int ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800905
906 bus_idx = ioeventfd_bus_from_flags(args->flags);
907 /* must be natural-word sized, or 0 to ignore length */
908 switch (args->len) {
909 case 0:
910 case 1:
911 case 2:
912 case 4:
913 case 8:
914 break;
915 default:
916 return -EINVAL;
917 }
918
919 /* check for range overflow */
920 if (args->addr + args->len < args->addr)
921 return -EINVAL;
922
923 /* check for extra flags that we don't understand */
924 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
925 return -EINVAL;
926
927 /* ioeventfd with no length can't be combined with DATAMATCH */
Jason Wange9ea5062015-09-15 14:41:59 +0800928 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
Jason Wang85da11c2015-09-15 14:41:55 +0800929 return -EINVAL;
930
Jason Wangeefd6b02015-09-15 14:41:56 +0800931 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
932 if (ret)
933 goto fail;
934
935 /* When length is ignored, MMIO is also put on a separate bus, for
936 * faster lookups.
937 */
938 if (!args->len && bus_idx == KVM_MMIO_BUS) {
939 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
940 if (ret < 0)
941 goto fast_fail;
942 }
943
944 return 0;
945
946fast_fail:
947 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
948fail:
949 return ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800950}
951
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400952int
953kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
954{
955 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
956 return kvm_deassign_ioeventfd(kvm, args);
957
958 return kvm_assign_ioeventfd(kvm, args);
959}