blob: 16e17ea06e1eebf591fc43971be98f390c515cb4 [file] [log] [blame]
Gregory Haskins721eecb2009-05-20 10:30:49 -04001/*
2 * kvm eventfd support - use eventfd objects to signal various KVM events
3 *
4 * Copyright 2009 Novell. All Rights Reserved.
Avi Kivity221d0592010-05-23 18:37:00 +03005 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Gregory Haskins721eecb2009-05-20 10:30:49 -04006 *
7 * Author:
8 * Gregory Haskins <ghaskins@novell.com>
9 *
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 */
23
24#include <linux/kvm_host.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040025#include <linux/kvm.h>
Eric Auger166c9772015-09-18 22:29:42 +080026#include <linux/kvm_irqfd.h>
Gregory Haskins721eecb2009-05-20 10:30:49 -040027#include <linux/workqueue.h>
28#include <linux/syscalls.h>
29#include <linux/wait.h>
30#include <linux/poll.h>
31#include <linux/file.h>
32#include <linux/list.h>
33#include <linux/eventfd.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040034#include <linux/kernel.h>
Christian Borntraeger719d93c2014-01-16 13:44:20 +010035#include <linux/srcu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Paul Mackerras56f89f32014-06-30 20:51:09 +100037#include <linux/seqlock.h>
Eric Auger9016cfb2015-09-18 22:29:44 +080038#include <linux/irqbypass.h>
Paul Mackerrase4d57e12014-06-30 20:51:12 +100039#include <trace/events/kvm.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040040
Andre Przywaraaf669ac2015-03-26 14:39:29 +000041#include <kvm/iodev.h>
Gregory Haskins721eecb2009-05-20 10:30:49 -040042
Paul Mackerras297e2102014-06-30 20:51:13 +100043#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecb2009-05-20 10:30:49 -040044
Paolo Bonzini36343f62016-10-26 13:35:56 +020045static struct workqueue_struct *irqfd_cleanup_wq;
Gregory Haskins721eecb2009-05-20 10:30:49 -040046
47static void
48irqfd_inject(struct work_struct *work)
49{
Eric Auger166c9772015-09-18 22:29:42 +080050 struct kvm_kernel_irqfd *irqfd =
51 container_of(work, struct kvm_kernel_irqfd, inject);
Gregory Haskins721eecb2009-05-20 10:30:49 -040052 struct kvm *kvm = irqfd->kvm;
53
Alex Williamson7a844282012-09-21 11:58:03 -060054 if (!irqfd->resampler) {
Yang Zhangaa2fbe62013-04-11 19:21:40 +080055 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
56 false);
57 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
58 false);
Alex Williamson7a844282012-09-21 11:58:03 -060059 } else
60 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +080061 irqfd->gsi, 1, false);
Alex Williamson7a844282012-09-21 11:58:03 -060062}
63
64/*
65 * Since resampler irqfds share an IRQ source ID, we de-assert once
66 * then notify all of the resampler irqfds using this GSI. We can't
67 * do multiple de-asserts or we risk racing with incoming re-asserts.
68 */
69static void
70irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
71{
Eric Auger166c9772015-09-18 22:29:42 +080072 struct kvm_kernel_irqfd_resampler *resampler;
Christian Borntraeger719d93c2014-01-16 13:44:20 +010073 struct kvm *kvm;
Eric Auger166c9772015-09-18 22:29:42 +080074 struct kvm_kernel_irqfd *irqfd;
Christian Borntraeger719d93c2014-01-16 13:44:20 +010075 int idx;
Alex Williamson7a844282012-09-21 11:58:03 -060076
Eric Auger166c9772015-09-18 22:29:42 +080077 resampler = container_of(kian,
78 struct kvm_kernel_irqfd_resampler, notifier);
Christian Borntraeger719d93c2014-01-16 13:44:20 +010079 kvm = resampler->kvm;
Alex Williamson7a844282012-09-21 11:58:03 -060080
Christian Borntraeger719d93c2014-01-16 13:44:20 +010081 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +080082 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -060083
Christian Borntraeger719d93c2014-01-16 13:44:20 +010084 idx = srcu_read_lock(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -060085
86 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
87 eventfd_signal(irqfd->resamplefd, 1);
88
Christian Borntraeger719d93c2014-01-16 13:44:20 +010089 srcu_read_unlock(&kvm->irq_srcu, idx);
Alex Williamson7a844282012-09-21 11:58:03 -060090}
91
92static void
Eric Auger166c9772015-09-18 22:29:42 +080093irqfd_resampler_shutdown(struct kvm_kernel_irqfd *irqfd)
Alex Williamson7a844282012-09-21 11:58:03 -060094{
Eric Auger166c9772015-09-18 22:29:42 +080095 struct kvm_kernel_irqfd_resampler *resampler = irqfd->resampler;
Alex Williamson7a844282012-09-21 11:58:03 -060096 struct kvm *kvm = resampler->kvm;
97
98 mutex_lock(&kvm->irqfds.resampler_lock);
99
100 list_del_rcu(&irqfd->resampler_link);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100101 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600102
103 if (list_empty(&resampler->list)) {
104 list_del(&resampler->link);
105 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
106 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800107 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -0600108 kfree(resampler);
109 }
110
111 mutex_unlock(&kvm->irqfds.resampler_lock);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400112}
113
114/*
115 * Race-free decouple logic (ordering is critical)
116 */
117static void
118irqfd_shutdown(struct work_struct *work)
119{
Eric Auger166c9772015-09-18 22:29:42 +0800120 struct kvm_kernel_irqfd *irqfd =
121 container_of(work, struct kvm_kernel_irqfd, shutdown);
Lan Tianyu76267a82017-12-21 21:10:36 -0500122 struct kvm *kvm = irqfd->kvm;
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200123 u64 cnt;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400124
Lan Tianyu76267a82017-12-21 21:10:36 -0500125 /* Make sure irqfd has been initalized in assign path. */
126 synchronize_srcu(&kvm->irq_srcu);
127
Gregory Haskins721eecb2009-05-20 10:30:49 -0400128 /*
129 * Synchronize with the wait-queue and unhook ourselves to prevent
130 * further events.
131 */
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200132 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400133
134 /*
135 * We know no new events will be scheduled at this point, so block
136 * until all previously outstanding events have completed
137 */
Tejun Heo43829732012-08-20 14:51:24 -0700138 flush_work(&irqfd->inject);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400139
Alex Williamson7a844282012-09-21 11:58:03 -0600140 if (irqfd->resampler) {
141 irqfd_resampler_shutdown(irqfd);
142 eventfd_ctx_put(irqfd->resamplefd);
143 }
144
Gregory Haskins721eecb2009-05-20 10:30:49 -0400145 /*
146 * It is now safe to release the object's resources
147 */
Eric Auger9016cfb2015-09-18 22:29:44 +0800148#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
149 irq_bypass_unregister_consumer(&irqfd->consumer);
150#endif
Gregory Haskins721eecb2009-05-20 10:30:49 -0400151 eventfd_ctx_put(irqfd->eventfd);
152 kfree(irqfd);
153}
154
155
156/* assumes kvm->irqfds.lock is held */
157static bool
Eric Auger166c9772015-09-18 22:29:42 +0800158irqfd_is_active(struct kvm_kernel_irqfd *irqfd)
Gregory Haskins721eecb2009-05-20 10:30:49 -0400159{
160 return list_empty(&irqfd->list) ? false : true;
161}
162
163/*
164 * Mark the irqfd as inactive and schedule it for removal
165 *
166 * assumes kvm->irqfds.lock is held
167 */
168static void
Eric Auger166c9772015-09-18 22:29:42 +0800169irqfd_deactivate(struct kvm_kernel_irqfd *irqfd)
Gregory Haskins721eecb2009-05-20 10:30:49 -0400170{
171 BUG_ON(!irqfd_is_active(irqfd));
172
173 list_del_init(&irqfd->list);
174
Paolo Bonzini36343f62016-10-26 13:35:56 +0200175 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400176}
177
Paolo Bonzinib97e6de2015-10-28 19:16:47 +0100178int __attribute__((weak)) kvm_arch_set_irq_inatomic(
Andrey Smetaninc9a5ecc2015-10-16 10:07:47 +0300179 struct kvm_kernel_irq_routing_entry *irq,
180 struct kvm *kvm, int irq_source_id,
181 int level,
182 bool line_status)
183{
184 return -EWOULDBLOCK;
185}
186
Gregory Haskins721eecb2009-05-20 10:30:49 -0400187/*
188 * Called with wqh->lock held and interrupts disabled
189 */
190static int
191irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
192{
Eric Auger166c9772015-09-18 22:29:42 +0800193 struct kvm_kernel_irqfd *irqfd =
194 container_of(wait, struct kvm_kernel_irqfd, wait);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400195 unsigned long flags = (unsigned long)key;
Paul Mackerras56f89f32014-06-30 20:51:09 +1000196 struct kvm_kernel_irq_routing_entry irq;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200197 struct kvm *kvm = irqfd->kvm;
Paul Mackerras56f89f32014-06-30 20:51:09 +1000198 unsigned seq;
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100199 int idx;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400200
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200201 if (flags & POLLIN) {
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100202 idx = srcu_read_lock(&kvm->irq_srcu);
Paul Mackerras56f89f32014-06-30 20:51:09 +1000203 do {
204 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
205 irq = irqfd->irq_entry;
206 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
Gregory Haskins721eecb2009-05-20 10:30:49 -0400207 /* An event has been signaled, inject an interrupt */
Paolo Bonzinib97e6de2015-10-28 19:16:47 +0100208 if (kvm_arch_set_irq_inatomic(&irq, kvm,
209 KVM_USERSPACE_IRQ_SOURCE_ID, 1,
210 false) == -EWOULDBLOCK)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200211 schedule_work(&irqfd->inject);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100212 srcu_read_unlock(&kvm->irq_srcu, idx);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200213 }
Gregory Haskins721eecb2009-05-20 10:30:49 -0400214
215 if (flags & POLLHUP) {
216 /* The eventfd is closing, detach from KVM */
Gregory Haskins721eecb2009-05-20 10:30:49 -0400217 unsigned long flags;
218
219 spin_lock_irqsave(&kvm->irqfds.lock, flags);
220
221 /*
222 * We must check if someone deactivated the irqfd before
223 * we could acquire the irqfds.lock since the item is
224 * deactivated from the KVM side before it is unhooked from
225 * the wait-queue. If it is already deactivated, we can
226 * simply return knowing the other side will cleanup for us.
227 * We cannot race against the irqfd going away since the
228 * other side is required to acquire wqh->lock, which we hold
229 */
230 if (irqfd_is_active(irqfd))
231 irqfd_deactivate(irqfd);
232
233 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
234 }
235
236 return 0;
237}
238
239static void
240irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
241 poll_table *pt)
242{
Eric Auger166c9772015-09-18 22:29:42 +0800243 struct kvm_kernel_irqfd *irqfd =
244 container_of(pt, struct kvm_kernel_irqfd, pt);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400245 add_wait_queue(wqh, &irqfd->wait);
246}
247
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200248/* Must be called under irqfds.lock */
Eric Auger166c9772015-09-18 22:29:42 +0800249static void irqfd_update(struct kvm *kvm, struct kvm_kernel_irqfd *irqfd)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200250{
251 struct kvm_kernel_irq_routing_entry *e;
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000252 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
Andrey Smetanin351dc6472015-10-16 10:07:45 +0300253 int n_entries;
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000254
Paul Mackerras9957c862014-06-30 20:51:11 +1000255 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200256
Paul Mackerras56f89f32014-06-30 20:51:09 +1000257 write_seqcount_begin(&irqfd->irq_entry_sc);
258
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000259 e = entries;
Andrey Smetanin351dc6472015-10-16 10:07:45 +0300260 if (n_entries == 1)
261 irqfd->irq_entry = *e;
262 else
263 irqfd->irq_entry.type = 0;
Paul Mackerras56f89f32014-06-30 20:51:09 +1000264
Paul Mackerras56f89f32014-06-30 20:51:09 +1000265 write_seqcount_end(&irqfd->irq_entry_sc);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200266}
267
Eric Auger1a02b272015-09-18 22:29:43 +0800268#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
269void __attribute__((weak)) kvm_arch_irq_bypass_stop(
270 struct irq_bypass_consumer *cons)
271{
272}
273
274void __attribute__((weak)) kvm_arch_irq_bypass_start(
275 struct irq_bypass_consumer *cons)
276{
277}
Feng Wuf70c20a2015-09-18 22:29:53 +0800278
279int __attribute__((weak)) kvm_arch_update_irqfd_routing(
280 struct kvm *kvm, unsigned int host_irq,
281 uint32_t guest_irq, bool set)
282{
283 return 0;
284}
Eric Auger1a02b272015-09-18 22:29:43 +0800285#endif
286
Gregory Haskins721eecb2009-05-20 10:30:49 -0400287static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600288kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecb2009-05-20 10:30:49 -0400289{
Eric Auger166c9772015-09-18 22:29:42 +0800290 struct kvm_kernel_irqfd *irqfd, *tmp;
Al Virocffe78d2013-08-30 15:47:17 -0400291 struct fd f;
Alex Williamson7a844282012-09-21 11:58:03 -0600292 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400293 int ret;
294 unsigned int events;
Paul Mackerras9957c862014-06-30 20:51:11 +1000295 int idx;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400296
Eric Auger01c94e62015-03-04 11:14:33 +0100297 if (!kvm_arch_intc_initialized(kvm))
298 return -EAGAIN;
299
Gregory Haskins721eecb2009-05-20 10:30:49 -0400300 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
301 if (!irqfd)
302 return -ENOMEM;
303
304 irqfd->kvm = kvm;
Alex Williamsond4db2932012-06-29 09:56:08 -0600305 irqfd->gsi = args->gsi;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400306 INIT_LIST_HEAD(&irqfd->list);
307 INIT_WORK(&irqfd->inject, irqfd_inject);
308 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
Paul Mackerras56f89f32014-06-30 20:51:09 +1000309 seqcount_init(&irqfd->irq_entry_sc);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400310
Al Virocffe78d2013-08-30 15:47:17 -0400311 f = fdget(args->fd);
312 if (!f.file) {
313 ret = -EBADF;
314 goto out;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400315 }
316
Al Virocffe78d2013-08-30 15:47:17 -0400317 eventfd = eventfd_ctx_fileget(f.file);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400318 if (IS_ERR(eventfd)) {
319 ret = PTR_ERR(eventfd);
320 goto fail;
321 }
322
323 irqfd->eventfd = eventfd;
324
Alex Williamson7a844282012-09-21 11:58:03 -0600325 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
Eric Auger166c9772015-09-18 22:29:42 +0800326 struct kvm_kernel_irqfd_resampler *resampler;
Alex Williamson7a844282012-09-21 11:58:03 -0600327
328 resamplefd = eventfd_ctx_fdget(args->resamplefd);
329 if (IS_ERR(resamplefd)) {
330 ret = PTR_ERR(resamplefd);
331 goto fail;
332 }
333
334 irqfd->resamplefd = resamplefd;
335 INIT_LIST_HEAD(&irqfd->resampler_link);
336
337 mutex_lock(&kvm->irqfds.resampler_lock);
338
339 list_for_each_entry(resampler,
Alex Williamson49f8a1a2012-12-06 14:44:59 -0700340 &kvm->irqfds.resampler_list, link) {
Alex Williamson7a844282012-09-21 11:58:03 -0600341 if (resampler->notifier.gsi == irqfd->gsi) {
342 irqfd->resampler = resampler;
343 break;
344 }
345 }
346
347 if (!irqfd->resampler) {
348 resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
349 if (!resampler) {
350 ret = -ENOMEM;
351 mutex_unlock(&kvm->irqfds.resampler_lock);
352 goto fail;
353 }
354
355 resampler->kvm = kvm;
356 INIT_LIST_HEAD(&resampler->list);
357 resampler->notifier.gsi = irqfd->gsi;
358 resampler->notifier.irq_acked = irqfd_resampler_ack;
359 INIT_LIST_HEAD(&resampler->link);
360
361 list_add(&resampler->link, &kvm->irqfds.resampler_list);
362 kvm_register_irq_ack_notifier(kvm,
363 &resampler->notifier);
364 irqfd->resampler = resampler;
365 }
366
367 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100368 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600369
370 mutex_unlock(&kvm->irqfds.resampler_lock);
371 }
372
Gregory Haskins721eecb2009-05-20 10:30:49 -0400373 /*
374 * Install our own custom wake-up handling so we are notified via
375 * a callback whenever someone signals the underlying eventfd
376 */
377 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
378 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
379
Michael S. Tsirkinf1d1c302010-01-13 18:58:09 +0200380 spin_lock_irq(&kvm->irqfds.lock);
381
382 ret = 0;
383 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
384 if (irqfd->eventfd != tmp->eventfd)
385 continue;
386 /* This fd is used for another irq already. */
387 ret = -EBUSY;
388 spin_unlock_irq(&kvm->irqfds.lock);
389 goto fail;
390 }
391
Paul Mackerras9957c862014-06-30 20:51:11 +1000392 idx = srcu_read_lock(&kvm->irq_srcu);
393 irqfd_update(kvm, irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200394
Gregory Haskins721eecb2009-05-20 10:30:49 -0400395 list_add_tail(&irqfd->list, &kvm->irqfds.items);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400396
Cornelia Huck684a0b72014-03-17 19:11:35 +0100397 spin_unlock_irq(&kvm->irqfds.lock);
398
Gregory Haskins721eecb2009-05-20 10:30:49 -0400399 /*
400 * Check if there was an event already pending on the eventfd
401 * before we registered, and trigger it as if we didn't miss it.
402 */
Cornelia Huck684a0b72014-03-17 19:11:35 +0100403 events = f.file->f_op->poll(f.file, &irqfd->pt);
404
Gregory Haskins721eecb2009-05-20 10:30:49 -0400405 if (events & POLLIN)
406 schedule_work(&irqfd->inject);
407
Eric Auger9016cfb2015-09-18 22:29:44 +0800408#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
Alex Williamson14717e22016-05-05 11:58:35 -0600409 if (kvm_arch_has_irq_bypass()) {
410 irqfd->consumer.token = (void *)irqfd->eventfd;
411 irqfd->consumer.add_producer = kvm_arch_irq_bypass_add_producer;
412 irqfd->consumer.del_producer = kvm_arch_irq_bypass_del_producer;
413 irqfd->consumer.stop = kvm_arch_irq_bypass_stop;
414 irqfd->consumer.start = kvm_arch_irq_bypass_start;
415 ret = irq_bypass_register_consumer(&irqfd->consumer);
416 if (ret)
417 pr_info("irq bypass consumer (token %p) registration fails: %d\n",
Eric Auger9016cfb2015-09-18 22:29:44 +0800418 irqfd->consumer.token, ret);
Alex Williamson14717e22016-05-05 11:58:35 -0600419 }
Eric Auger9016cfb2015-09-18 22:29:44 +0800420#endif
Gregory Haskins721eecb2009-05-20 10:30:49 -0400421
Lan Tianyu76267a82017-12-21 21:10:36 -0500422 srcu_read_unlock(&kvm->irq_srcu, idx);
Paolo Bonzini1cd0c7d2018-05-28 13:31:13 +0200423
424 /*
425 * do not drop the file until the irqfd is fully initialized, otherwise
426 * we might race against the POLLHUP
427 */
428 fdput(f);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400429 return 0;
430
431fail:
Alex Williamson7a844282012-09-21 11:58:03 -0600432 if (irqfd->resampler)
433 irqfd_resampler_shutdown(irqfd);
434
435 if (resamplefd && !IS_ERR(resamplefd))
436 eventfd_ctx_put(resamplefd);
437
Gregory Haskins721eecb2009-05-20 10:30:49 -0400438 if (eventfd && !IS_ERR(eventfd))
439 eventfd_ctx_put(eventfd);
440
Al Virocffe78d2013-08-30 15:47:17 -0400441 fdput(f);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400442
Al Virocffe78d2013-08-30 15:47:17 -0400443out:
Gregory Haskins721eecb2009-05-20 10:30:49 -0400444 kfree(irqfd);
445 return ret;
446}
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200447
448bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
449{
450 struct kvm_irq_ack_notifier *kian;
451 int gsi, idx;
452
453 idx = srcu_read_lock(&kvm->irq_srcu);
454 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
455 if (gsi != -1)
456 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
457 link)
458 if (kian->gsi == gsi) {
459 srcu_read_unlock(&kvm->irq_srcu, idx);
460 return true;
461 }
462
463 srcu_read_unlock(&kvm->irq_srcu, idx);
464
465 return false;
466}
467EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
468
Andrey Smetaninba1aefc2015-10-16 10:07:46 +0300469void kvm_notify_acked_gsi(struct kvm *kvm, int gsi)
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200470{
471 struct kvm_irq_ack_notifier *kian;
Andrey Smetaninba1aefc2015-10-16 10:07:46 +0300472
473 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
474 link)
475 if (kian->gsi == gsi)
476 kian->irq_acked(kian);
477}
478
479void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
480{
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200481 int gsi, idx;
482
483 trace_kvm_ack_irq(irqchip, pin);
484
485 idx = srcu_read_lock(&kvm->irq_srcu);
486 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
487 if (gsi != -1)
Andrey Smetaninba1aefc2015-10-16 10:07:46 +0300488 kvm_notify_acked_gsi(kvm, gsi);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200489 srcu_read_unlock(&kvm->irq_srcu, idx);
490}
491
492void kvm_register_irq_ack_notifier(struct kvm *kvm,
493 struct kvm_irq_ack_notifier *kian)
494{
495 mutex_lock(&kvm->irq_lock);
496 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
497 mutex_unlock(&kvm->irq_lock);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200498 kvm_vcpu_request_scan_ioapic(kvm);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200499}
500
501void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
502 struct kvm_irq_ack_notifier *kian)
503{
504 mutex_lock(&kvm->irq_lock);
505 hlist_del_init_rcu(&kian->link);
506 mutex_unlock(&kvm->irq_lock);
507 synchronize_srcu(&kvm->irq_srcu);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200508 kvm_vcpu_request_scan_ioapic(kvm);
Paolo Bonzinic77dcac2014-08-06 14:24:45 +0200509}
Alexander Graf914daba2012-10-09 00:22:59 +0200510#endif
Gregory Haskins721eecb2009-05-20 10:30:49 -0400511
512void
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400513kvm_eventfd_init(struct kvm *kvm)
Gregory Haskins721eecb2009-05-20 10:30:49 -0400514{
Paul Mackerras297e2102014-06-30 20:51:13 +1000515#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecb2009-05-20 10:30:49 -0400516 spin_lock_init(&kvm->irqfds.lock);
517 INIT_LIST_HEAD(&kvm->irqfds.items);
Alex Williamson7a844282012-09-21 11:58:03 -0600518 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
519 mutex_init(&kvm->irqfds.resampler_lock);
Alexander Graf914daba2012-10-09 00:22:59 +0200520#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400521 INIT_LIST_HEAD(&kvm->ioeventfds);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400522}
523
Paul Mackerras297e2102014-06-30 20:51:13 +1000524#ifdef CONFIG_HAVE_KVM_IRQFD
Gregory Haskins721eecb2009-05-20 10:30:49 -0400525/*
526 * shutdown any irqfd's that match fd+gsi
527 */
528static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600529kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecb2009-05-20 10:30:49 -0400530{
Eric Auger166c9772015-09-18 22:29:42 +0800531 struct kvm_kernel_irqfd *irqfd, *tmp;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400532 struct eventfd_ctx *eventfd;
533
Alex Williamsond4db2932012-06-29 09:56:08 -0600534 eventfd = eventfd_ctx_fdget(args->fd);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400535 if (IS_ERR(eventfd))
536 return PTR_ERR(eventfd);
537
538 spin_lock_irq(&kvm->irqfds.lock);
539
540 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
Alex Williamsond4db2932012-06-29 09:56:08 -0600541 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200542 /*
Paul Mackerras56f89f32014-06-30 20:51:09 +1000543 * This clearing of irq_entry.type is needed for when
Michael S. Tsirkinc8ce0572011-03-06 13:03:26 +0200544 * another thread calls kvm_irq_routing_update before
545 * we flush workqueue below (we synchronize with
546 * kvm_irq_routing_update using irqfds.lock).
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200547 */
Paul Mackerras56f89f32014-06-30 20:51:09 +1000548 write_seqcount_begin(&irqfd->irq_entry_sc);
549 irqfd->irq_entry.type = 0;
550 write_seqcount_end(&irqfd->irq_entry_sc);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400551 irqfd_deactivate(irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200552 }
Gregory Haskins721eecb2009-05-20 10:30:49 -0400553 }
554
555 spin_unlock_irq(&kvm->irqfds.lock);
556 eventfd_ctx_put(eventfd);
557
558 /*
559 * Block until we know all outstanding shutdown jobs have completed
560 * so that we guarantee there will not be any more interrupts on this
561 * gsi once this deassign function returns.
562 */
Paolo Bonzini36343f62016-10-26 13:35:56 +0200563 flush_workqueue(irqfd_cleanup_wq);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400564
565 return 0;
566}
567
568int
Alex Williamsond4db2932012-06-29 09:56:08 -0600569kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecb2009-05-20 10:30:49 -0400570{
Alex Williamson7a844282012-09-21 11:58:03 -0600571 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
Alex Williamson326cf032012-06-29 09:56:24 -0600572 return -EINVAL;
573
Alex Williamsond4db2932012-06-29 09:56:08 -0600574 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
575 return kvm_irqfd_deassign(kvm, args);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400576
Alex Williamsond4db2932012-06-29 09:56:08 -0600577 return kvm_irqfd_assign(kvm, args);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400578}
579
580/*
581 * This function is called as the kvm VM fd is being released. Shutdown all
582 * irqfds that still remain open
583 */
584void
585kvm_irqfd_release(struct kvm *kvm)
586{
Eric Auger166c9772015-09-18 22:29:42 +0800587 struct kvm_kernel_irqfd *irqfd, *tmp;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400588
589 spin_lock_irq(&kvm->irqfds.lock);
590
591 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
592 irqfd_deactivate(irqfd);
593
594 spin_unlock_irq(&kvm->irqfds.lock);
595
596 /*
597 * Block until we know all outstanding shutdown jobs have completed
598 * since we do not take a kvm* reference.
599 */
Paolo Bonzini36343f62016-10-26 13:35:56 +0200600 flush_workqueue(irqfd_cleanup_wq);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400601
602}
603
604/*
Paul Mackerras9957c862014-06-30 20:51:11 +1000605 * Take note of a change in irq routing.
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100606 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200607 */
Paul Mackerras9957c862014-06-30 20:51:11 +1000608void kvm_irq_routing_update(struct kvm *kvm)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200609{
Eric Auger166c9772015-09-18 22:29:42 +0800610 struct kvm_kernel_irqfd *irqfd;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200611
612 spin_lock_irq(&kvm->irqfds.lock);
613
Feng Wuf70c20a2015-09-18 22:29:53 +0800614 list_for_each_entry(irqfd, &kvm->irqfds.items, list) {
Paul Mackerras9957c862014-06-30 20:51:11 +1000615 irqfd_update(kvm, irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200616
Feng Wuf70c20a2015-09-18 22:29:53 +0800617#ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
618 if (irqfd->producer) {
619 int ret = kvm_arch_update_irqfd_routing(
620 irqfd->kvm, irqfd->producer->irq,
621 irqfd->gsi, 1);
622 WARN_ON(ret);
623 }
624#endif
625 }
626
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200627 spin_unlock_irq(&kvm->irqfds.lock);
628}
629
Paolo Bonzini36343f62016-10-26 13:35:56 +0200630/*
631 * create a host-wide workqueue for issuing deferred shutdown requests
632 * aggregated from all vm* instances. We need our own isolated
633 * queue to ease flushing work items when a VM exits.
634 */
635int kvm_irqfd_init(void)
636{
637 irqfd_cleanup_wq = alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
638 if (!irqfd_cleanup_wq)
639 return -ENOMEM;
640
641 return 0;
642}
643
Cornelia Hucka0f155e2013-02-28 12:33:18 +0100644void kvm_irqfd_exit(void)
Gregory Haskins721eecb2009-05-20 10:30:49 -0400645{
Paolo Bonzini36343f62016-10-26 13:35:56 +0200646 destroy_workqueue(irqfd_cleanup_wq);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400647}
Alexander Graf914daba2012-10-09 00:22:59 +0200648#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400649
650/*
651 * --------------------------------------------------------------------
652 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
653 *
654 * userspace can register a PIO/MMIO address with an eventfd for receiving
655 * notification when the memory has been touched.
656 * --------------------------------------------------------------------
657 */
658
659struct _ioeventfd {
660 struct list_head list;
661 u64 addr;
662 int length;
663 struct eventfd_ctx *eventfd;
664 u64 datamatch;
665 struct kvm_io_device dev;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300666 u8 bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400667 bool wildcard;
668};
669
670static inline struct _ioeventfd *
671to_ioeventfd(struct kvm_io_device *dev)
672{
673 return container_of(dev, struct _ioeventfd, dev);
674}
675
676static void
677ioeventfd_release(struct _ioeventfd *p)
678{
679 eventfd_ctx_put(p->eventfd);
680 list_del(&p->list);
681 kfree(p);
682}
683
684static bool
685ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
686{
687 u64 _val;
688
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300689 if (addr != p->addr)
690 /* address must be precise for a hit */
691 return false;
692
693 if (!p->length)
694 /* length = 0 means only look at the address, so always a hit */
695 return true;
696
697 if (len != p->length)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400698 /* address-range must be precise for a hit */
699 return false;
700
701 if (p->wildcard)
702 /* all else equal, wildcard is always a hit */
703 return true;
704
705 /* otherwise, we have to actually compare the data */
706
707 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
708
709 switch (len) {
710 case 1:
711 _val = *(u8 *)val;
712 break;
713 case 2:
714 _val = *(u16 *)val;
715 break;
716 case 4:
717 _val = *(u32 *)val;
718 break;
719 case 8:
720 _val = *(u64 *)val;
721 break;
722 default:
723 return false;
724 }
725
726 return _val == p->datamatch ? true : false;
727}
728
729/* MMIO/PIO writes trigger an event if the addr/val match */
730static int
Nikolay Nikolaeve32edf42015-03-26 14:39:28 +0000731ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
732 int len, const void *val)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400733{
734 struct _ioeventfd *p = to_ioeventfd(this);
735
736 if (!ioeventfd_in_range(p, addr, len, val))
737 return -EOPNOTSUPP;
738
739 eventfd_signal(p->eventfd, 1);
740 return 0;
741}
742
743/*
744 * This function is called as KVM is completely shutting down. We do not
745 * need to worry about locking just nuke anything we have as quickly as possible
746 */
747static void
748ioeventfd_destructor(struct kvm_io_device *this)
749{
750 struct _ioeventfd *p = to_ioeventfd(this);
751
752 ioeventfd_release(p);
753}
754
755static const struct kvm_io_device_ops ioeventfd_ops = {
756 .write = ioeventfd_write,
757 .destructor = ioeventfd_destructor,
758};
759
760/* assumes kvm->slots_lock held */
761static bool
762ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
763{
764 struct _ioeventfd *_p;
765
766 list_for_each_entry(_p, &kvm->ioeventfds, list)
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300767 if (_p->bus_idx == p->bus_idx &&
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300768 _p->addr == p->addr &&
769 (!_p->length || !p->length ||
770 (_p->length == p->length &&
771 (_p->wildcard || p->wildcard ||
772 _p->datamatch == p->datamatch))))
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400773 return true;
774
775 return false;
776}
777
Cornelia Huck2b834512013-02-28 12:33:20 +0100778static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
779{
780 if (flags & KVM_IOEVENTFD_FLAG_PIO)
781 return KVM_PIO_BUS;
782 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
783 return KVM_VIRTIO_CCW_NOTIFY_BUS;
784 return KVM_MMIO_BUS;
785}
786
Jason Wang85da11c2015-09-15 14:41:55 +0800787static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
788 enum kvm_bus bus_idx,
789 struct kvm_ioeventfd *args)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400790{
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400791
Jason Wang85da11c2015-09-15 14:41:55 +0800792 struct eventfd_ctx *eventfd;
793 struct _ioeventfd *p;
794 int ret;
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300795
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400796 eventfd = eventfd_ctx_fdget(args->fd);
797 if (IS_ERR(eventfd))
798 return PTR_ERR(eventfd);
799
800 p = kzalloc(sizeof(*p), GFP_KERNEL);
801 if (!p) {
802 ret = -ENOMEM;
803 goto fail;
804 }
805
806 INIT_LIST_HEAD(&p->list);
807 p->addr = args->addr;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300808 p->bus_idx = bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400809 p->length = args->len;
810 p->eventfd = eventfd;
811
812 /* The datamatch feature is optional, otherwise this is a wildcard */
813 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
814 p->datamatch = args->datamatch;
815 else
816 p->wildcard = true;
817
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200818 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400819
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300820 /* Verify that there isn't a match already */
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400821 if (ioeventfd_check_collision(kvm, p)) {
822 ret = -EEXIST;
823 goto unlock_fail;
824 }
825
826 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
827
Sasha Levin743eeb02011-07-27 16:00:48 +0300828 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
829 &p->dev);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400830 if (ret < 0)
831 goto unlock_fail;
832
Amos Kong6ea34c92013-05-25 06:44:15 +0800833 kvm->buses[bus_idx]->ioeventfd_count++;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400834 list_add_tail(&p->list, &kvm->ioeventfds);
835
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200836 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400837
838 return 0;
839
840unlock_fail:
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200841 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400842
843fail:
844 kfree(p);
845 eventfd_ctx_put(eventfd);
846
847 return ret;
848}
849
850static int
Jason Wang85da11c2015-09-15 14:41:55 +0800851kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
852 struct kvm_ioeventfd *args)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400853{
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400854 struct _ioeventfd *p, *tmp;
855 struct eventfd_ctx *eventfd;
856 int ret = -ENOENT;
857
858 eventfd = eventfd_ctx_fdget(args->fd);
859 if (IS_ERR(eventfd))
860 return PTR_ERR(eventfd);
861
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200862 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400863
864 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
865 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
866
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300867 if (p->bus_idx != bus_idx ||
868 p->eventfd != eventfd ||
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400869 p->addr != args->addr ||
870 p->length != args->len ||
871 p->wildcard != wildcard)
872 continue;
873
874 if (!p->wildcard && p->datamatch != args->datamatch)
875 continue;
876
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200877 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
David Hildenbrand15636252017-03-23 18:24:19 +0100878 if (kvm->buses[bus_idx])
879 kvm->buses[bus_idx]->ioeventfd_count--;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400880 ioeventfd_release(p);
881 ret = 0;
882 break;
883 }
884
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200885 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400886
887 eventfd_ctx_put(eventfd);
888
889 return ret;
890}
891
Jason Wang85da11c2015-09-15 14:41:55 +0800892static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
893{
894 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
Jason Wangeefd6b02015-09-15 14:41:56 +0800895 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
Jason Wang85da11c2015-09-15 14:41:55 +0800896
Jason Wangeefd6b02015-09-15 14:41:56 +0800897 if (!args->len && bus_idx == KVM_MMIO_BUS)
898 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
899
900 return ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800901}
902
903static int
904kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
905{
906 enum kvm_bus bus_idx;
Jason Wangeefd6b02015-09-15 14:41:56 +0800907 int ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800908
909 bus_idx = ioeventfd_bus_from_flags(args->flags);
910 /* must be natural-word sized, or 0 to ignore length */
911 switch (args->len) {
912 case 0:
913 case 1:
914 case 2:
915 case 4:
916 case 8:
917 break;
918 default:
919 return -EINVAL;
920 }
921
922 /* check for range overflow */
923 if (args->addr + args->len < args->addr)
924 return -EINVAL;
925
926 /* check for extra flags that we don't understand */
927 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
928 return -EINVAL;
929
930 /* ioeventfd with no length can't be combined with DATAMATCH */
Jason Wange9ea5062015-09-15 14:41:59 +0800931 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
Jason Wang85da11c2015-09-15 14:41:55 +0800932 return -EINVAL;
933
Jason Wangeefd6b02015-09-15 14:41:56 +0800934 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
935 if (ret)
936 goto fail;
937
938 /* When length is ignored, MMIO is also put on a separate bus, for
939 * faster lookups.
940 */
941 if (!args->len && bus_idx == KVM_MMIO_BUS) {
942 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
943 if (ret < 0)
944 goto fast_fail;
945 }
946
947 return 0;
948
949fast_fail:
950 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
951fail:
952 return ret;
Jason Wang85da11c2015-09-15 14:41:55 +0800953}
954
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400955int
956kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
957{
958 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
959 return kvm_deassign_ioeventfd(kvm, args);
960
961 return kvm_assign_ioeventfd(kvm, args);
962}