Gregory Haskins | 721eecbf | 2009-05-20 10:30:49 -0400 | [diff] [blame^] | 1 | /* |
| 2 | * kvm eventfd support - use eventfd objects to signal various KVM events |
| 3 | * |
| 4 | * Copyright 2009 Novell. All Rights Reserved. |
| 5 | * |
| 6 | * Author: |
| 7 | * Gregory Haskins <ghaskins@novell.com> |
| 8 | * |
| 9 | * This file is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of version 2 of the GNU General Public License |
| 11 | * as published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program; if not, write to the Free Software Foundation, |
| 20 | * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA. |
| 21 | */ |
| 22 | |
| 23 | #include <linux/kvm_host.h> |
| 24 | #include <linux/workqueue.h> |
| 25 | #include <linux/syscalls.h> |
| 26 | #include <linux/wait.h> |
| 27 | #include <linux/poll.h> |
| 28 | #include <linux/file.h> |
| 29 | #include <linux/list.h> |
| 30 | #include <linux/eventfd.h> |
| 31 | |
| 32 | /* |
| 33 | * -------------------------------------------------------------------- |
| 34 | * irqfd: Allows an fd to be used to inject an interrupt to the guest |
| 35 | * |
| 36 | * Credit goes to Avi Kivity for the original idea. |
| 37 | * -------------------------------------------------------------------- |
| 38 | */ |
| 39 | |
| 40 | struct _irqfd { |
| 41 | struct kvm *kvm; |
| 42 | struct eventfd_ctx *eventfd; |
| 43 | int gsi; |
| 44 | struct list_head list; |
| 45 | poll_table pt; |
| 46 | wait_queue_head_t *wqh; |
| 47 | wait_queue_t wait; |
| 48 | struct work_struct inject; |
| 49 | struct work_struct shutdown; |
| 50 | }; |
| 51 | |
| 52 | static struct workqueue_struct *irqfd_cleanup_wq; |
| 53 | |
| 54 | static void |
| 55 | irqfd_inject(struct work_struct *work) |
| 56 | { |
| 57 | struct _irqfd *irqfd = container_of(work, struct _irqfd, inject); |
| 58 | struct kvm *kvm = irqfd->kvm; |
| 59 | |
| 60 | mutex_lock(&kvm->lock); |
| 61 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1); |
| 62 | kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0); |
| 63 | mutex_unlock(&kvm->lock); |
| 64 | } |
| 65 | |
| 66 | /* |
| 67 | * Race-free decouple logic (ordering is critical) |
| 68 | */ |
| 69 | static void |
| 70 | irqfd_shutdown(struct work_struct *work) |
| 71 | { |
| 72 | struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown); |
| 73 | |
| 74 | /* |
| 75 | * Synchronize with the wait-queue and unhook ourselves to prevent |
| 76 | * further events. |
| 77 | */ |
| 78 | remove_wait_queue(irqfd->wqh, &irqfd->wait); |
| 79 | |
| 80 | /* |
| 81 | * We know no new events will be scheduled at this point, so block |
| 82 | * until all previously outstanding events have completed |
| 83 | */ |
| 84 | flush_work(&irqfd->inject); |
| 85 | |
| 86 | /* |
| 87 | * It is now safe to release the object's resources |
| 88 | */ |
| 89 | eventfd_ctx_put(irqfd->eventfd); |
| 90 | kfree(irqfd); |
| 91 | } |
| 92 | |
| 93 | |
| 94 | /* assumes kvm->irqfds.lock is held */ |
| 95 | static bool |
| 96 | irqfd_is_active(struct _irqfd *irqfd) |
| 97 | { |
| 98 | return list_empty(&irqfd->list) ? false : true; |
| 99 | } |
| 100 | |
| 101 | /* |
| 102 | * Mark the irqfd as inactive and schedule it for removal |
| 103 | * |
| 104 | * assumes kvm->irqfds.lock is held |
| 105 | */ |
| 106 | static void |
| 107 | irqfd_deactivate(struct _irqfd *irqfd) |
| 108 | { |
| 109 | BUG_ON(!irqfd_is_active(irqfd)); |
| 110 | |
| 111 | list_del_init(&irqfd->list); |
| 112 | |
| 113 | queue_work(irqfd_cleanup_wq, &irqfd->shutdown); |
| 114 | } |
| 115 | |
| 116 | /* |
| 117 | * Called with wqh->lock held and interrupts disabled |
| 118 | */ |
| 119 | static int |
| 120 | irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key) |
| 121 | { |
| 122 | struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait); |
| 123 | unsigned long flags = (unsigned long)key; |
| 124 | |
| 125 | if (flags & POLLIN) |
| 126 | /* An event has been signaled, inject an interrupt */ |
| 127 | schedule_work(&irqfd->inject); |
| 128 | |
| 129 | if (flags & POLLHUP) { |
| 130 | /* The eventfd is closing, detach from KVM */ |
| 131 | struct kvm *kvm = irqfd->kvm; |
| 132 | unsigned long flags; |
| 133 | |
| 134 | spin_lock_irqsave(&kvm->irqfds.lock, flags); |
| 135 | |
| 136 | /* |
| 137 | * We must check if someone deactivated the irqfd before |
| 138 | * we could acquire the irqfds.lock since the item is |
| 139 | * deactivated from the KVM side before it is unhooked from |
| 140 | * the wait-queue. If it is already deactivated, we can |
| 141 | * simply return knowing the other side will cleanup for us. |
| 142 | * We cannot race against the irqfd going away since the |
| 143 | * other side is required to acquire wqh->lock, which we hold |
| 144 | */ |
| 145 | if (irqfd_is_active(irqfd)) |
| 146 | irqfd_deactivate(irqfd); |
| 147 | |
| 148 | spin_unlock_irqrestore(&kvm->irqfds.lock, flags); |
| 149 | } |
| 150 | |
| 151 | return 0; |
| 152 | } |
| 153 | |
| 154 | static void |
| 155 | irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh, |
| 156 | poll_table *pt) |
| 157 | { |
| 158 | struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt); |
| 159 | |
| 160 | irqfd->wqh = wqh; |
| 161 | add_wait_queue(wqh, &irqfd->wait); |
| 162 | } |
| 163 | |
| 164 | static int |
| 165 | kvm_irqfd_assign(struct kvm *kvm, int fd, int gsi) |
| 166 | { |
| 167 | struct _irqfd *irqfd; |
| 168 | struct file *file = NULL; |
| 169 | struct eventfd_ctx *eventfd = NULL; |
| 170 | int ret; |
| 171 | unsigned int events; |
| 172 | |
| 173 | irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL); |
| 174 | if (!irqfd) |
| 175 | return -ENOMEM; |
| 176 | |
| 177 | irqfd->kvm = kvm; |
| 178 | irqfd->gsi = gsi; |
| 179 | INIT_LIST_HEAD(&irqfd->list); |
| 180 | INIT_WORK(&irqfd->inject, irqfd_inject); |
| 181 | INIT_WORK(&irqfd->shutdown, irqfd_shutdown); |
| 182 | |
| 183 | file = eventfd_fget(fd); |
| 184 | if (IS_ERR(file)) { |
| 185 | ret = PTR_ERR(file); |
| 186 | goto fail; |
| 187 | } |
| 188 | |
| 189 | eventfd = eventfd_ctx_fileget(file); |
| 190 | if (IS_ERR(eventfd)) { |
| 191 | ret = PTR_ERR(eventfd); |
| 192 | goto fail; |
| 193 | } |
| 194 | |
| 195 | irqfd->eventfd = eventfd; |
| 196 | |
| 197 | /* |
| 198 | * Install our own custom wake-up handling so we are notified via |
| 199 | * a callback whenever someone signals the underlying eventfd |
| 200 | */ |
| 201 | init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup); |
| 202 | init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc); |
| 203 | |
| 204 | events = file->f_op->poll(file, &irqfd->pt); |
| 205 | |
| 206 | spin_lock_irq(&kvm->irqfds.lock); |
| 207 | list_add_tail(&irqfd->list, &kvm->irqfds.items); |
| 208 | spin_unlock_irq(&kvm->irqfds.lock); |
| 209 | |
| 210 | /* |
| 211 | * Check if there was an event already pending on the eventfd |
| 212 | * before we registered, and trigger it as if we didn't miss it. |
| 213 | */ |
| 214 | if (events & POLLIN) |
| 215 | schedule_work(&irqfd->inject); |
| 216 | |
| 217 | /* |
| 218 | * do not drop the file until the irqfd is fully initialized, otherwise |
| 219 | * we might race against the POLLHUP |
| 220 | */ |
| 221 | fput(file); |
| 222 | |
| 223 | return 0; |
| 224 | |
| 225 | fail: |
| 226 | if (eventfd && !IS_ERR(eventfd)) |
| 227 | eventfd_ctx_put(eventfd); |
| 228 | |
| 229 | if (file && !IS_ERR(file)) |
| 230 | fput(file); |
| 231 | |
| 232 | kfree(irqfd); |
| 233 | return ret; |
| 234 | } |
| 235 | |
| 236 | void |
| 237 | kvm_irqfd_init(struct kvm *kvm) |
| 238 | { |
| 239 | spin_lock_init(&kvm->irqfds.lock); |
| 240 | INIT_LIST_HEAD(&kvm->irqfds.items); |
| 241 | } |
| 242 | |
| 243 | /* |
| 244 | * shutdown any irqfd's that match fd+gsi |
| 245 | */ |
| 246 | static int |
| 247 | kvm_irqfd_deassign(struct kvm *kvm, int fd, int gsi) |
| 248 | { |
| 249 | struct _irqfd *irqfd, *tmp; |
| 250 | struct eventfd_ctx *eventfd; |
| 251 | |
| 252 | eventfd = eventfd_ctx_fdget(fd); |
| 253 | if (IS_ERR(eventfd)) |
| 254 | return PTR_ERR(eventfd); |
| 255 | |
| 256 | spin_lock_irq(&kvm->irqfds.lock); |
| 257 | |
| 258 | list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) { |
| 259 | if (irqfd->eventfd == eventfd && irqfd->gsi == gsi) |
| 260 | irqfd_deactivate(irqfd); |
| 261 | } |
| 262 | |
| 263 | spin_unlock_irq(&kvm->irqfds.lock); |
| 264 | eventfd_ctx_put(eventfd); |
| 265 | |
| 266 | /* |
| 267 | * Block until we know all outstanding shutdown jobs have completed |
| 268 | * so that we guarantee there will not be any more interrupts on this |
| 269 | * gsi once this deassign function returns. |
| 270 | */ |
| 271 | flush_workqueue(irqfd_cleanup_wq); |
| 272 | |
| 273 | return 0; |
| 274 | } |
| 275 | |
| 276 | int |
| 277 | kvm_irqfd(struct kvm *kvm, int fd, int gsi, int flags) |
| 278 | { |
| 279 | if (flags & KVM_IRQFD_FLAG_DEASSIGN) |
| 280 | return kvm_irqfd_deassign(kvm, fd, gsi); |
| 281 | |
| 282 | return kvm_irqfd_assign(kvm, fd, gsi); |
| 283 | } |
| 284 | |
| 285 | /* |
| 286 | * This function is called as the kvm VM fd is being released. Shutdown all |
| 287 | * irqfds that still remain open |
| 288 | */ |
| 289 | void |
| 290 | kvm_irqfd_release(struct kvm *kvm) |
| 291 | { |
| 292 | struct _irqfd *irqfd, *tmp; |
| 293 | |
| 294 | spin_lock_irq(&kvm->irqfds.lock); |
| 295 | |
| 296 | list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) |
| 297 | irqfd_deactivate(irqfd); |
| 298 | |
| 299 | spin_unlock_irq(&kvm->irqfds.lock); |
| 300 | |
| 301 | /* |
| 302 | * Block until we know all outstanding shutdown jobs have completed |
| 303 | * since we do not take a kvm* reference. |
| 304 | */ |
| 305 | flush_workqueue(irqfd_cleanup_wq); |
| 306 | |
| 307 | } |
| 308 | |
| 309 | /* |
| 310 | * create a host-wide workqueue for issuing deferred shutdown requests |
| 311 | * aggregated from all vm* instances. We need our own isolated single-thread |
| 312 | * queue to prevent deadlock against flushing the normal work-queue. |
| 313 | */ |
| 314 | static int __init irqfd_module_init(void) |
| 315 | { |
| 316 | irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup"); |
| 317 | if (!irqfd_cleanup_wq) |
| 318 | return -ENOMEM; |
| 319 | |
| 320 | return 0; |
| 321 | } |
| 322 | |
| 323 | static void __exit irqfd_module_exit(void) |
| 324 | { |
| 325 | destroy_workqueue(irqfd_cleanup_wq); |
| 326 | } |
| 327 | |
| 328 | module_init(irqfd_module_init); |
| 329 | module_exit(irqfd_module_exit); |