blob: f0075ffb0c35d1585569e0e7932abbd843367374 [file] [log] [blame]
Gregory Haskins721eecb2009-05-20 10:30:49 -04001/*
2 * kvm eventfd support - use eventfd objects to signal various KVM events
3 *
4 * Copyright 2009 Novell. All Rights Reserved.
Avi Kivity221d0592010-05-23 18:37:00 +03005 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Gregory Haskins721eecb2009-05-20 10:30:49 -04006 *
7 * Author:
8 * Gregory Haskins <ghaskins@novell.com>
9 *
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 */
23
24#include <linux/kvm_host.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040025#include <linux/kvm.h>
Gregory Haskins721eecb2009-05-20 10:30:49 -040026#include <linux/workqueue.h>
27#include <linux/syscalls.h>
28#include <linux/wait.h>
29#include <linux/poll.h>
30#include <linux/file.h>
31#include <linux/list.h>
32#include <linux/eventfd.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040033#include <linux/kernel.h>
Christian Borntraeger719d93c2014-01-16 13:44:20 +010034#include <linux/srcu.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/slab.h>
Paul Mackerras56f89f32014-06-30 20:51:09 +100036#include <linux/seqlock.h>
Gregory Haskinsd34e6b12009-07-07 17:08:49 -040037
38#include "iodev.h"
Gregory Haskins721eecb2009-05-20 10:30:49 -040039
Alexander Grafa725d562013-04-17 13:29:30 +020040#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
Gregory Haskins721eecb2009-05-20 10:30:49 -040041/*
42 * --------------------------------------------------------------------
43 * irqfd: Allows an fd to be used to inject an interrupt to the guest
44 *
45 * Credit goes to Avi Kivity for the original idea.
46 * --------------------------------------------------------------------
47 */
48
Alex Williamson7a844282012-09-21 11:58:03 -060049/*
50 * Resampling irqfds are a special variety of irqfds used to emulate
51 * level triggered interrupts. The interrupt is asserted on eventfd
52 * trigger. On acknowledgement through the irq ack notifier, the
53 * interrupt is de-asserted and userspace is notified through the
54 * resamplefd. All resamplers on the same gsi are de-asserted
55 * together, so we don't need to track the state of each individual
56 * user. We can also therefore share the same irq source ID.
57 */
58struct _irqfd_resampler {
59 struct kvm *kvm;
60 /*
61 * List of resampling struct _irqfd objects sharing this gsi.
62 * RCU list modified under kvm->irqfds.resampler_lock
63 */
64 struct list_head list;
65 struct kvm_irq_ack_notifier notifier;
66 /*
67 * Entry in list of kvm->irqfd.resampler_list. Use for sharing
68 * resamplers among irqfds on the same gsi.
69 * Accessed and modified under kvm->irqfds.resampler_lock
70 */
71 struct list_head link;
72};
73
Gregory Haskins721eecb2009-05-20 10:30:49 -040074struct _irqfd {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +020075 /* Used for MSI fast-path */
76 struct kvm *kvm;
77 wait_queue_t wait;
78 /* Update side is protected by irqfds.lock */
Paul Mackerras56f89f32014-06-30 20:51:09 +100079 struct kvm_kernel_irq_routing_entry irq_entry;
80 seqcount_t irq_entry_sc;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +020081 /* Used for level IRQ fast-path */
82 int gsi;
83 struct work_struct inject;
Alex Williamson7a844282012-09-21 11:58:03 -060084 /* The resampler used by this irqfd (resampler-only) */
85 struct _irqfd_resampler *resampler;
86 /* Eventfd notified on resample (resampler-only) */
87 struct eventfd_ctx *resamplefd;
88 /* Entry in list of irqfds for a resampler (resampler-only) */
89 struct list_head resampler_link;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +020090 /* Used for setup/shutdown */
91 struct eventfd_ctx *eventfd;
92 struct list_head list;
93 poll_table pt;
94 struct work_struct shutdown;
Gregory Haskins721eecb2009-05-20 10:30:49 -040095};
96
97static struct workqueue_struct *irqfd_cleanup_wq;
98
99static void
100irqfd_inject(struct work_struct *work)
101{
102 struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
103 struct kvm *kvm = irqfd->kvm;
104
Alex Williamson7a844282012-09-21 11:58:03 -0600105 if (!irqfd->resampler) {
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800106 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
107 false);
108 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
109 false);
Alex Williamson7a844282012-09-21 11:58:03 -0600110 } else
111 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800112 irqfd->gsi, 1, false);
Alex Williamson7a844282012-09-21 11:58:03 -0600113}
114
115/*
116 * Since resampler irqfds share an IRQ source ID, we de-assert once
117 * then notify all of the resampler irqfds using this GSI. We can't
118 * do multiple de-asserts or we risk racing with incoming re-asserts.
119 */
120static void
121irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
122{
123 struct _irqfd_resampler *resampler;
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100124 struct kvm *kvm;
Alex Williamson7a844282012-09-21 11:58:03 -0600125 struct _irqfd *irqfd;
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100126 int idx;
Alex Williamson7a844282012-09-21 11:58:03 -0600127
128 resampler = container_of(kian, struct _irqfd_resampler, notifier);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100129 kvm = resampler->kvm;
Alex Williamson7a844282012-09-21 11:58:03 -0600130
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100131 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800132 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -0600133
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100134 idx = srcu_read_lock(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600135
136 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
137 eventfd_signal(irqfd->resamplefd, 1);
138
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100139 srcu_read_unlock(&kvm->irq_srcu, idx);
Alex Williamson7a844282012-09-21 11:58:03 -0600140}
141
142static void
143irqfd_resampler_shutdown(struct _irqfd *irqfd)
144{
145 struct _irqfd_resampler *resampler = irqfd->resampler;
146 struct kvm *kvm = resampler->kvm;
147
148 mutex_lock(&kvm->irqfds.resampler_lock);
149
150 list_del_rcu(&irqfd->resampler_link);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100151 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600152
153 if (list_empty(&resampler->list)) {
154 list_del(&resampler->link);
155 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
156 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800157 resampler->notifier.gsi, 0, false);
Alex Williamson7a844282012-09-21 11:58:03 -0600158 kfree(resampler);
159 }
160
161 mutex_unlock(&kvm->irqfds.resampler_lock);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400162}
163
164/*
165 * Race-free decouple logic (ordering is critical)
166 */
167static void
168irqfd_shutdown(struct work_struct *work)
169{
170 struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown);
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200171 u64 cnt;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400172
173 /*
174 * Synchronize with the wait-queue and unhook ourselves to prevent
175 * further events.
176 */
Michael S. Tsirkinb6a114d2010-01-13 19:12:30 +0200177 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400178
179 /*
180 * We know no new events will be scheduled at this point, so block
181 * until all previously outstanding events have completed
182 */
Tejun Heo43829732012-08-20 14:51:24 -0700183 flush_work(&irqfd->inject);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400184
Alex Williamson7a844282012-09-21 11:58:03 -0600185 if (irqfd->resampler) {
186 irqfd_resampler_shutdown(irqfd);
187 eventfd_ctx_put(irqfd->resamplefd);
188 }
189
Gregory Haskins721eecb2009-05-20 10:30:49 -0400190 /*
191 * It is now safe to release the object's resources
192 */
193 eventfd_ctx_put(irqfd->eventfd);
194 kfree(irqfd);
195}
196
197
198/* assumes kvm->irqfds.lock is held */
199static bool
200irqfd_is_active(struct _irqfd *irqfd)
201{
202 return list_empty(&irqfd->list) ? false : true;
203}
204
205/*
206 * Mark the irqfd as inactive and schedule it for removal
207 *
208 * assumes kvm->irqfds.lock is held
209 */
210static void
211irqfd_deactivate(struct _irqfd *irqfd)
212{
213 BUG_ON(!irqfd_is_active(irqfd));
214
215 list_del_init(&irqfd->list);
216
217 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
218}
219
220/*
221 * Called with wqh->lock held and interrupts disabled
222 */
223static int
224irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
225{
226 struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
227 unsigned long flags = (unsigned long)key;
Paul Mackerras56f89f32014-06-30 20:51:09 +1000228 struct kvm_kernel_irq_routing_entry irq;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200229 struct kvm *kvm = irqfd->kvm;
Paul Mackerras56f89f32014-06-30 20:51:09 +1000230 unsigned seq;
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100231 int idx;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400232
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200233 if (flags & POLLIN) {
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100234 idx = srcu_read_lock(&kvm->irq_srcu);
Paul Mackerras56f89f32014-06-30 20:51:09 +1000235 do {
236 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
237 irq = irqfd->irq_entry;
238 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
Gregory Haskins721eecb2009-05-20 10:30:49 -0400239 /* An event has been signaled, inject an interrupt */
Paul Mackerras56f89f32014-06-30 20:51:09 +1000240 if (irq.type == KVM_IRQ_ROUTING_MSI)
241 kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
Yang Zhangaa2fbe62013-04-11 19:21:40 +0800242 false);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200243 else
244 schedule_work(&irqfd->inject);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100245 srcu_read_unlock(&kvm->irq_srcu, idx);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200246 }
Gregory Haskins721eecb2009-05-20 10:30:49 -0400247
248 if (flags & POLLHUP) {
249 /* The eventfd is closing, detach from KVM */
Gregory Haskins721eecb2009-05-20 10:30:49 -0400250 unsigned long flags;
251
252 spin_lock_irqsave(&kvm->irqfds.lock, flags);
253
254 /*
255 * We must check if someone deactivated the irqfd before
256 * we could acquire the irqfds.lock since the item is
257 * deactivated from the KVM side before it is unhooked from
258 * the wait-queue. If it is already deactivated, we can
259 * simply return knowing the other side will cleanup for us.
260 * We cannot race against the irqfd going away since the
261 * other side is required to acquire wqh->lock, which we hold
262 */
263 if (irqfd_is_active(irqfd))
264 irqfd_deactivate(irqfd);
265
266 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
267 }
268
269 return 0;
270}
271
272static void
273irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
274 poll_table *pt)
275{
276 struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400277 add_wait_queue(wqh, &irqfd->wait);
278}
279
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200280/* Must be called under irqfds.lock */
Paul Mackerras9957c862014-06-30 20:51:11 +1000281static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200282{
283 struct kvm_kernel_irq_routing_entry *e;
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000284 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
285 int i, n_entries;
286
Paul Mackerras9957c862014-06-30 20:51:11 +1000287 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200288
Paul Mackerras56f89f32014-06-30 20:51:09 +1000289 write_seqcount_begin(&irqfd->irq_entry_sc);
290
291 irqfd->irq_entry.type = 0;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200292
Paul Mackerras8ba918d2014-06-30 20:51:10 +1000293 e = entries;
294 for (i = 0; i < n_entries; ++i, ++e) {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200295 /* Only fast-path MSI. */
296 if (e->type == KVM_IRQ_ROUTING_MSI)
Paul Mackerras56f89f32014-06-30 20:51:09 +1000297 irqfd->irq_entry = *e;
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200298 }
Paul Mackerras56f89f32014-06-30 20:51:09 +1000299
Paul Mackerras56f89f32014-06-30 20:51:09 +1000300 write_seqcount_end(&irqfd->irq_entry_sc);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200301}
302
Gregory Haskins721eecb2009-05-20 10:30:49 -0400303static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600304kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecb2009-05-20 10:30:49 -0400305{
Michael S. Tsirkinf1d1c302010-01-13 18:58:09 +0200306 struct _irqfd *irqfd, *tmp;
Al Virocffe78d2013-08-30 15:47:17 -0400307 struct fd f;
Alex Williamson7a844282012-09-21 11:58:03 -0600308 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400309 int ret;
310 unsigned int events;
Paul Mackerras9957c862014-06-30 20:51:11 +1000311 int idx;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400312
313 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
314 if (!irqfd)
315 return -ENOMEM;
316
317 irqfd->kvm = kvm;
Alex Williamsond4db2932012-06-29 09:56:08 -0600318 irqfd->gsi = args->gsi;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400319 INIT_LIST_HEAD(&irqfd->list);
320 INIT_WORK(&irqfd->inject, irqfd_inject);
321 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
Paul Mackerras56f89f32014-06-30 20:51:09 +1000322 seqcount_init(&irqfd->irq_entry_sc);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400323
Al Virocffe78d2013-08-30 15:47:17 -0400324 f = fdget(args->fd);
325 if (!f.file) {
326 ret = -EBADF;
327 goto out;
Gregory Haskins721eecb2009-05-20 10:30:49 -0400328 }
329
Al Virocffe78d2013-08-30 15:47:17 -0400330 eventfd = eventfd_ctx_fileget(f.file);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400331 if (IS_ERR(eventfd)) {
332 ret = PTR_ERR(eventfd);
333 goto fail;
334 }
335
336 irqfd->eventfd = eventfd;
337
Alex Williamson7a844282012-09-21 11:58:03 -0600338 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
339 struct _irqfd_resampler *resampler;
340
341 resamplefd = eventfd_ctx_fdget(args->resamplefd);
342 if (IS_ERR(resamplefd)) {
343 ret = PTR_ERR(resamplefd);
344 goto fail;
345 }
346
347 irqfd->resamplefd = resamplefd;
348 INIT_LIST_HEAD(&irqfd->resampler_link);
349
350 mutex_lock(&kvm->irqfds.resampler_lock);
351
352 list_for_each_entry(resampler,
Alex Williamson49f8a1a2012-12-06 14:44:59 -0700353 &kvm->irqfds.resampler_list, link) {
Alex Williamson7a844282012-09-21 11:58:03 -0600354 if (resampler->notifier.gsi == irqfd->gsi) {
355 irqfd->resampler = resampler;
356 break;
357 }
358 }
359
360 if (!irqfd->resampler) {
361 resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
362 if (!resampler) {
363 ret = -ENOMEM;
364 mutex_unlock(&kvm->irqfds.resampler_lock);
365 goto fail;
366 }
367
368 resampler->kvm = kvm;
369 INIT_LIST_HEAD(&resampler->list);
370 resampler->notifier.gsi = irqfd->gsi;
371 resampler->notifier.irq_acked = irqfd_resampler_ack;
372 INIT_LIST_HEAD(&resampler->link);
373
374 list_add(&resampler->link, &kvm->irqfds.resampler_list);
375 kvm_register_irq_ack_notifier(kvm,
376 &resampler->notifier);
377 irqfd->resampler = resampler;
378 }
379
380 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100381 synchronize_srcu(&kvm->irq_srcu);
Alex Williamson7a844282012-09-21 11:58:03 -0600382
383 mutex_unlock(&kvm->irqfds.resampler_lock);
384 }
385
Gregory Haskins721eecb2009-05-20 10:30:49 -0400386 /*
387 * Install our own custom wake-up handling so we are notified via
388 * a callback whenever someone signals the underlying eventfd
389 */
390 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
391 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
392
Michael S. Tsirkinf1d1c302010-01-13 18:58:09 +0200393 spin_lock_irq(&kvm->irqfds.lock);
394
395 ret = 0;
396 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
397 if (irqfd->eventfd != tmp->eventfd)
398 continue;
399 /* This fd is used for another irq already. */
400 ret = -EBUSY;
401 spin_unlock_irq(&kvm->irqfds.lock);
402 goto fail;
403 }
404
Paul Mackerras9957c862014-06-30 20:51:11 +1000405 idx = srcu_read_lock(&kvm->irq_srcu);
406 irqfd_update(kvm, irqfd);
407 srcu_read_unlock(&kvm->irq_srcu, idx);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200408
Gregory Haskins721eecb2009-05-20 10:30:49 -0400409 list_add_tail(&irqfd->list, &kvm->irqfds.items);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400410
Cornelia Huck684a0b72014-03-17 19:11:35 +0100411 spin_unlock_irq(&kvm->irqfds.lock);
412
Gregory Haskins721eecb2009-05-20 10:30:49 -0400413 /*
414 * Check if there was an event already pending on the eventfd
415 * before we registered, and trigger it as if we didn't miss it.
416 */
Cornelia Huck684a0b72014-03-17 19:11:35 +0100417 events = f.file->f_op->poll(f.file, &irqfd->pt);
418
Gregory Haskins721eecb2009-05-20 10:30:49 -0400419 if (events & POLLIN)
420 schedule_work(&irqfd->inject);
421
422 /*
423 * do not drop the file until the irqfd is fully initialized, otherwise
424 * we might race against the POLLHUP
425 */
Al Virocffe78d2013-08-30 15:47:17 -0400426 fdput(f);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400427
428 return 0;
429
430fail:
Alex Williamson7a844282012-09-21 11:58:03 -0600431 if (irqfd->resampler)
432 irqfd_resampler_shutdown(irqfd);
433
434 if (resamplefd && !IS_ERR(resamplefd))
435 eventfd_ctx_put(resamplefd);
436
Gregory Haskins721eecb2009-05-20 10:30:49 -0400437 if (eventfd && !IS_ERR(eventfd))
438 eventfd_ctx_put(eventfd);
439
Al Virocffe78d2013-08-30 15:47:17 -0400440 fdput(f);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400441
Al Virocffe78d2013-08-30 15:47:17 -0400442out:
Gregory Haskins721eecb2009-05-20 10:30:49 -0400443 kfree(irqfd);
444 return ret;
445}
Alexander Graf914daba2012-10-09 00:22:59 +0200446#endif
Gregory Haskins721eecb2009-05-20 10:30:49 -0400447
448void
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400449kvm_eventfd_init(struct kvm *kvm)
Gregory Haskins721eecb2009-05-20 10:30:49 -0400450{
Alexander Grafa725d562013-04-17 13:29:30 +0200451#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
Gregory Haskins721eecb2009-05-20 10:30:49 -0400452 spin_lock_init(&kvm->irqfds.lock);
453 INIT_LIST_HEAD(&kvm->irqfds.items);
Alex Williamson7a844282012-09-21 11:58:03 -0600454 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
455 mutex_init(&kvm->irqfds.resampler_lock);
Alexander Graf914daba2012-10-09 00:22:59 +0200456#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400457 INIT_LIST_HEAD(&kvm->ioeventfds);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400458}
459
Alexander Grafa725d562013-04-17 13:29:30 +0200460#ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
Gregory Haskins721eecb2009-05-20 10:30:49 -0400461/*
462 * shutdown any irqfd's that match fd+gsi
463 */
464static int
Alex Williamsond4db2932012-06-29 09:56:08 -0600465kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecb2009-05-20 10:30:49 -0400466{
467 struct _irqfd *irqfd, *tmp;
468 struct eventfd_ctx *eventfd;
469
Alex Williamsond4db2932012-06-29 09:56:08 -0600470 eventfd = eventfd_ctx_fdget(args->fd);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400471 if (IS_ERR(eventfd))
472 return PTR_ERR(eventfd);
473
474 spin_lock_irq(&kvm->irqfds.lock);
475
476 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
Alex Williamsond4db2932012-06-29 09:56:08 -0600477 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200478 /*
Paul Mackerras56f89f32014-06-30 20:51:09 +1000479 * This clearing of irq_entry.type is needed for when
Michael S. Tsirkinc8ce0572011-03-06 13:03:26 +0200480 * another thread calls kvm_irq_routing_update before
481 * we flush workqueue below (we synchronize with
482 * kvm_irq_routing_update using irqfds.lock).
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200483 */
Paul Mackerras56f89f32014-06-30 20:51:09 +1000484 write_seqcount_begin(&irqfd->irq_entry_sc);
485 irqfd->irq_entry.type = 0;
486 write_seqcount_end(&irqfd->irq_entry_sc);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400487 irqfd_deactivate(irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200488 }
Gregory Haskins721eecb2009-05-20 10:30:49 -0400489 }
490
491 spin_unlock_irq(&kvm->irqfds.lock);
492 eventfd_ctx_put(eventfd);
493
494 /*
495 * Block until we know all outstanding shutdown jobs have completed
496 * so that we guarantee there will not be any more interrupts on this
497 * gsi once this deassign function returns.
498 */
499 flush_workqueue(irqfd_cleanup_wq);
500
501 return 0;
502}
503
504int
Alex Williamsond4db2932012-06-29 09:56:08 -0600505kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
Gregory Haskins721eecb2009-05-20 10:30:49 -0400506{
Alex Williamson7a844282012-09-21 11:58:03 -0600507 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
Alex Williamson326cf032012-06-29 09:56:24 -0600508 return -EINVAL;
509
Alex Williamsond4db2932012-06-29 09:56:08 -0600510 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
511 return kvm_irqfd_deassign(kvm, args);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400512
Alex Williamsond4db2932012-06-29 09:56:08 -0600513 return kvm_irqfd_assign(kvm, args);
Gregory Haskins721eecb2009-05-20 10:30:49 -0400514}
515
516/*
517 * This function is called as the kvm VM fd is being released. Shutdown all
518 * irqfds that still remain open
519 */
520void
521kvm_irqfd_release(struct kvm *kvm)
522{
523 struct _irqfd *irqfd, *tmp;
524
525 spin_lock_irq(&kvm->irqfds.lock);
526
527 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
528 irqfd_deactivate(irqfd);
529
530 spin_unlock_irq(&kvm->irqfds.lock);
531
532 /*
533 * Block until we know all outstanding shutdown jobs have completed
534 * since we do not take a kvm* reference.
535 */
536 flush_workqueue(irqfd_cleanup_wq);
537
538}
539
540/*
Paul Mackerras9957c862014-06-30 20:51:11 +1000541 * Take note of a change in irq routing.
Christian Borntraeger719d93c2014-01-16 13:44:20 +0100542 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200543 */
Paul Mackerras9957c862014-06-30 20:51:11 +1000544void kvm_irq_routing_update(struct kvm *kvm)
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200545{
546 struct _irqfd *irqfd;
547
548 spin_lock_irq(&kvm->irqfds.lock);
549
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200550 list_for_each_entry(irqfd, &kvm->irqfds.items, list)
Paul Mackerras9957c862014-06-30 20:51:11 +1000551 irqfd_update(kvm, irqfd);
Michael S. Tsirkinbd2b53b2010-11-18 19:09:08 +0200552
553 spin_unlock_irq(&kvm->irqfds.lock);
554}
555
556/*
Gregory Haskins721eecb2009-05-20 10:30:49 -0400557 * create a host-wide workqueue for issuing deferred shutdown requests
558 * aggregated from all vm* instances. We need our own isolated single-thread
559 * queue to prevent deadlock against flushing the normal work-queue.
560 */
Cornelia Hucka0f155e2013-02-28 12:33:18 +0100561int kvm_irqfd_init(void)
Gregory Haskins721eecb2009-05-20 10:30:49 -0400562{
563 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
564 if (!irqfd_cleanup_wq)
565 return -ENOMEM;
566
567 return 0;
568}
569
Cornelia Hucka0f155e2013-02-28 12:33:18 +0100570void kvm_irqfd_exit(void)
Gregory Haskins721eecb2009-05-20 10:30:49 -0400571{
572 destroy_workqueue(irqfd_cleanup_wq);
573}
Alexander Graf914daba2012-10-09 00:22:59 +0200574#endif
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400575
576/*
577 * --------------------------------------------------------------------
578 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
579 *
580 * userspace can register a PIO/MMIO address with an eventfd for receiving
581 * notification when the memory has been touched.
582 * --------------------------------------------------------------------
583 */
584
585struct _ioeventfd {
586 struct list_head list;
587 u64 addr;
588 int length;
589 struct eventfd_ctx *eventfd;
590 u64 datamatch;
591 struct kvm_io_device dev;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300592 u8 bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400593 bool wildcard;
594};
595
596static inline struct _ioeventfd *
597to_ioeventfd(struct kvm_io_device *dev)
598{
599 return container_of(dev, struct _ioeventfd, dev);
600}
601
602static void
603ioeventfd_release(struct _ioeventfd *p)
604{
605 eventfd_ctx_put(p->eventfd);
606 list_del(&p->list);
607 kfree(p);
608}
609
610static bool
611ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
612{
613 u64 _val;
614
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300615 if (addr != p->addr)
616 /* address must be precise for a hit */
617 return false;
618
619 if (!p->length)
620 /* length = 0 means only look at the address, so always a hit */
621 return true;
622
623 if (len != p->length)
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400624 /* address-range must be precise for a hit */
625 return false;
626
627 if (p->wildcard)
628 /* all else equal, wildcard is always a hit */
629 return true;
630
631 /* otherwise, we have to actually compare the data */
632
633 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
634
635 switch (len) {
636 case 1:
637 _val = *(u8 *)val;
638 break;
639 case 2:
640 _val = *(u16 *)val;
641 break;
642 case 4:
643 _val = *(u32 *)val;
644 break;
645 case 8:
646 _val = *(u64 *)val;
647 break;
648 default:
649 return false;
650 }
651
652 return _val == p->datamatch ? true : false;
653}
654
655/* MMIO/PIO writes trigger an event if the addr/val match */
656static int
657ioeventfd_write(struct kvm_io_device *this, gpa_t addr, int len,
658 const void *val)
659{
660 struct _ioeventfd *p = to_ioeventfd(this);
661
662 if (!ioeventfd_in_range(p, addr, len, val))
663 return -EOPNOTSUPP;
664
665 eventfd_signal(p->eventfd, 1);
666 return 0;
667}
668
669/*
670 * This function is called as KVM is completely shutting down. We do not
671 * need to worry about locking just nuke anything we have as quickly as possible
672 */
673static void
674ioeventfd_destructor(struct kvm_io_device *this)
675{
676 struct _ioeventfd *p = to_ioeventfd(this);
677
678 ioeventfd_release(p);
679}
680
681static const struct kvm_io_device_ops ioeventfd_ops = {
682 .write = ioeventfd_write,
683 .destructor = ioeventfd_destructor,
684};
685
686/* assumes kvm->slots_lock held */
687static bool
688ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
689{
690 struct _ioeventfd *_p;
691
692 list_for_each_entry(_p, &kvm->ioeventfds, list)
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300693 if (_p->bus_idx == p->bus_idx &&
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300694 _p->addr == p->addr &&
695 (!_p->length || !p->length ||
696 (_p->length == p->length &&
697 (_p->wildcard || p->wildcard ||
698 _p->datamatch == p->datamatch))))
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400699 return true;
700
701 return false;
702}
703
Cornelia Huck2b834512013-02-28 12:33:20 +0100704static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
705{
706 if (flags & KVM_IOEVENTFD_FLAG_PIO)
707 return KVM_PIO_BUS;
708 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
709 return KVM_VIRTIO_CCW_NOTIFY_BUS;
710 return KVM_MMIO_BUS;
711}
712
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400713static int
714kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
715{
Cornelia Huck2b834512013-02-28 12:33:20 +0100716 enum kvm_bus bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400717 struct _ioeventfd *p;
718 struct eventfd_ctx *eventfd;
719 int ret;
720
Cornelia Huck2b834512013-02-28 12:33:20 +0100721 bus_idx = ioeventfd_bus_from_flags(args->flags);
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300722 /* must be natural-word sized, or 0 to ignore length */
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400723 switch (args->len) {
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300724 case 0:
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400725 case 1:
726 case 2:
727 case 4:
728 case 8:
729 break;
730 default:
731 return -EINVAL;
732 }
733
734 /* check for range overflow */
735 if (args->addr + args->len < args->addr)
736 return -EINVAL;
737
738 /* check for extra flags that we don't understand */
739 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
740 return -EINVAL;
741
Michael S. Tsirkinf848a5a2014-03-31 21:50:38 +0300742 /* ioeventfd with no length can't be combined with DATAMATCH */
743 if (!args->len &&
744 args->flags & (KVM_IOEVENTFD_FLAG_PIO |
745 KVM_IOEVENTFD_FLAG_DATAMATCH))
746 return -EINVAL;
747
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400748 eventfd = eventfd_ctx_fdget(args->fd);
749 if (IS_ERR(eventfd))
750 return PTR_ERR(eventfd);
751
752 p = kzalloc(sizeof(*p), GFP_KERNEL);
753 if (!p) {
754 ret = -ENOMEM;
755 goto fail;
756 }
757
758 INIT_LIST_HEAD(&p->list);
759 p->addr = args->addr;
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300760 p->bus_idx = bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400761 p->length = args->len;
762 p->eventfd = eventfd;
763
764 /* The datamatch feature is optional, otherwise this is a wildcard */
765 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
766 p->datamatch = args->datamatch;
767 else
768 p->wildcard = true;
769
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200770 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400771
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300772 /* Verify that there isn't a match already */
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400773 if (ioeventfd_check_collision(kvm, p)) {
774 ret = -EEXIST;
775 goto unlock_fail;
776 }
777
778 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
779
Sasha Levin743eeb02011-07-27 16:00:48 +0300780 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
781 &p->dev);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400782 if (ret < 0)
783 goto unlock_fail;
784
Michael S. Tsirkin68c3b4d2014-03-31 21:50:44 +0300785 /* When length is ignored, MMIO is also put on a separate bus, for
786 * faster lookups.
787 */
788 if (!args->len && !(args->flags & KVM_IOEVENTFD_FLAG_PIO)) {
789 ret = kvm_io_bus_register_dev(kvm, KVM_FAST_MMIO_BUS,
790 p->addr, 0, &p->dev);
791 if (ret < 0)
792 goto register_fail;
793 }
794
Amos Kong6ea34c92013-05-25 06:44:15 +0800795 kvm->buses[bus_idx]->ioeventfd_count++;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400796 list_add_tail(&p->list, &kvm->ioeventfds);
797
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200798 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400799
800 return 0;
801
Michael S. Tsirkin68c3b4d2014-03-31 21:50:44 +0300802register_fail:
803 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400804unlock_fail:
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200805 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400806
807fail:
808 kfree(p);
809 eventfd_ctx_put(eventfd);
810
811 return ret;
812}
813
814static int
815kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
816{
Cornelia Huck2b834512013-02-28 12:33:20 +0100817 enum kvm_bus bus_idx;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400818 struct _ioeventfd *p, *tmp;
819 struct eventfd_ctx *eventfd;
820 int ret = -ENOENT;
821
Cornelia Huck2b834512013-02-28 12:33:20 +0100822 bus_idx = ioeventfd_bus_from_flags(args->flags);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400823 eventfd = eventfd_ctx_fdget(args->fd);
824 if (IS_ERR(eventfd))
825 return PTR_ERR(eventfd);
826
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200827 mutex_lock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400828
829 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
830 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
831
Michael S. Tsirkin05e07f92013-04-04 13:27:21 +0300832 if (p->bus_idx != bus_idx ||
833 p->eventfd != eventfd ||
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400834 p->addr != args->addr ||
835 p->length != args->len ||
836 p->wildcard != wildcard)
837 continue;
838
839 if (!p->wildcard && p->datamatch != args->datamatch)
840 continue;
841
Marcelo Tosattie93f8a02009-12-23 14:35:24 -0200842 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
Michael S. Tsirkin68c3b4d2014-03-31 21:50:44 +0300843 if (!p->length) {
844 kvm_io_bus_unregister_dev(kvm, KVM_FAST_MMIO_BUS,
845 &p->dev);
846 }
Amos Kong6ea34c92013-05-25 06:44:15 +0800847 kvm->buses[bus_idx]->ioeventfd_count--;
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400848 ioeventfd_release(p);
849 ret = 0;
850 break;
851 }
852
Marcelo Tosatti79fac952009-12-23 14:35:26 -0200853 mutex_unlock(&kvm->slots_lock);
Gregory Haskinsd34e6b12009-07-07 17:08:49 -0400854
855 eventfd_ctx_put(eventfd);
856
857 return ret;
858}
859
860int
861kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
862{
863 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
864 return kvm_deassign_ioeventfd(kvm, args);
865
866 return kvm_assign_ioeventfd(kvm, args);
867}