Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 1 | /* |
| 2 | * VFIO: IOMMU DMA mapping support for Type1 IOMMU |
| 3 | * |
| 4 | * Copyright (C) 2012 Red Hat, Inc. All rights reserved. |
| 5 | * Author: Alex Williamson <alex.williamson@redhat.com> |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License version 2 as |
| 9 | * published by the Free Software Foundation. |
| 10 | * |
| 11 | * Derived from original vfio: |
| 12 | * Copyright 2010 Cisco Systems, Inc. All rights reserved. |
| 13 | * Author: Tom Lyon, pugs@cisco.com |
| 14 | * |
| 15 | * We arbitrarily define a Type1 IOMMU as one matching the below code. |
| 16 | * It could be called the x86 IOMMU as it's designed for AMD-Vi & Intel |
| 17 | * VT-d, but that makes it harder to re-use as theoretically anyone |
| 18 | * implementing a similar IOMMU could make use of this. We expect the |
| 19 | * IOMMU to support the IOMMU API and have few to no restrictions around |
| 20 | * the IOVA range that can be mapped. The Type1 IOMMU is currently |
| 21 | * optimized for relatively static mappings of a userspace process with |
| 22 | * userpsace pages pinned into memory. We also assume devices and IOMMU |
| 23 | * domains are PCI based as the IOMMU API is still centered around a |
| 24 | * device/bus interface rather than a group interface. |
| 25 | */ |
| 26 | |
| 27 | #include <linux/compat.h> |
| 28 | #include <linux/device.h> |
| 29 | #include <linux/fs.h> |
| 30 | #include <linux/iommu.h> |
| 31 | #include <linux/module.h> |
| 32 | #include <linux/mm.h> |
Alex Williamson | cd9b226 | 2013-06-21 09:37:50 -0600 | [diff] [blame] | 33 | #include <linux/rbtree.h> |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 34 | #include <linux/sched.h> |
| 35 | #include <linux/slab.h> |
| 36 | #include <linux/uaccess.h> |
| 37 | #include <linux/vfio.h> |
| 38 | #include <linux/workqueue.h> |
| 39 | |
| 40 | #define DRIVER_VERSION "0.2" |
| 41 | #define DRIVER_AUTHOR "Alex Williamson <alex.williamson@redhat.com>" |
| 42 | #define DRIVER_DESC "Type1 IOMMU driver for VFIO" |
| 43 | |
| 44 | static bool allow_unsafe_interrupts; |
| 45 | module_param_named(allow_unsafe_interrupts, |
| 46 | allow_unsafe_interrupts, bool, S_IRUGO | S_IWUSR); |
| 47 | MODULE_PARM_DESC(allow_unsafe_interrupts, |
| 48 | "Enable VFIO IOMMU support for on platforms without interrupt remapping support."); |
| 49 | |
Alex Williamson | 5c6c2b2 | 2013-06-21 09:38:11 -0600 | [diff] [blame] | 50 | static bool disable_hugepages; |
| 51 | module_param_named(disable_hugepages, |
| 52 | disable_hugepages, bool, S_IRUGO | S_IWUSR); |
| 53 | MODULE_PARM_DESC(disable_hugepages, |
| 54 | "Disable VFIO IOMMU support for IOMMU hugepages."); |
| 55 | |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 56 | struct vfio_iommu { |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 57 | struct list_head domain_list; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 58 | struct mutex lock; |
Alex Williamson | cd9b226 | 2013-06-21 09:37:50 -0600 | [diff] [blame] | 59 | struct rb_root dma_list; |
Will Deacon | f5c9ece | 2014-09-29 10:06:19 -0600 | [diff] [blame] | 60 | bool v2; |
| 61 | bool nesting; |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 62 | }; |
| 63 | |
| 64 | struct vfio_domain { |
| 65 | struct iommu_domain *domain; |
| 66 | struct list_head next; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 67 | struct list_head group_list; |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 68 | int prot; /* IOMMU_CACHE */ |
Alex Williamson | 6fe1010 | 2015-02-06 10:58:56 -0700 | [diff] [blame] | 69 | bool fgsp; /* Fine-grained super pages */ |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 70 | }; |
| 71 | |
| 72 | struct vfio_dma { |
Alex Williamson | cd9b226 | 2013-06-21 09:37:50 -0600 | [diff] [blame] | 73 | struct rb_node node; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 74 | dma_addr_t iova; /* Device address */ |
| 75 | unsigned long vaddr; /* Process virtual addr */ |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 76 | size_t size; /* Map size (bytes) */ |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 77 | int prot; /* IOMMU_READ/WRITE */ |
| 78 | }; |
| 79 | |
| 80 | struct vfio_group { |
| 81 | struct iommu_group *iommu_group; |
| 82 | struct list_head next; |
| 83 | }; |
| 84 | |
| 85 | /* |
| 86 | * This code handles mapping and unmapping of user data buffers |
| 87 | * into DMA'ble space using the IOMMU |
| 88 | */ |
| 89 | |
Alex Williamson | cd9b226 | 2013-06-21 09:37:50 -0600 | [diff] [blame] | 90 | static struct vfio_dma *vfio_find_dma(struct vfio_iommu *iommu, |
| 91 | dma_addr_t start, size_t size) |
| 92 | { |
| 93 | struct rb_node *node = iommu->dma_list.rb_node; |
| 94 | |
| 95 | while (node) { |
| 96 | struct vfio_dma *dma = rb_entry(node, struct vfio_dma, node); |
| 97 | |
| 98 | if (start + size <= dma->iova) |
| 99 | node = node->rb_left; |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 100 | else if (start >= dma->iova + dma->size) |
Alex Williamson | cd9b226 | 2013-06-21 09:37:50 -0600 | [diff] [blame] | 101 | node = node->rb_right; |
| 102 | else |
| 103 | return dma; |
| 104 | } |
| 105 | |
| 106 | return NULL; |
| 107 | } |
| 108 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 109 | static void vfio_link_dma(struct vfio_iommu *iommu, struct vfio_dma *new) |
Alex Williamson | cd9b226 | 2013-06-21 09:37:50 -0600 | [diff] [blame] | 110 | { |
| 111 | struct rb_node **link = &iommu->dma_list.rb_node, *parent = NULL; |
| 112 | struct vfio_dma *dma; |
| 113 | |
| 114 | while (*link) { |
| 115 | parent = *link; |
| 116 | dma = rb_entry(parent, struct vfio_dma, node); |
| 117 | |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 118 | if (new->iova + new->size <= dma->iova) |
Alex Williamson | cd9b226 | 2013-06-21 09:37:50 -0600 | [diff] [blame] | 119 | link = &(*link)->rb_left; |
| 120 | else |
| 121 | link = &(*link)->rb_right; |
| 122 | } |
| 123 | |
| 124 | rb_link_node(&new->node, parent, link); |
| 125 | rb_insert_color(&new->node, &iommu->dma_list); |
| 126 | } |
| 127 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 128 | static void vfio_unlink_dma(struct vfio_iommu *iommu, struct vfio_dma *old) |
Alex Williamson | cd9b226 | 2013-06-21 09:37:50 -0600 | [diff] [blame] | 129 | { |
| 130 | rb_erase(&old->node, &iommu->dma_list); |
| 131 | } |
| 132 | |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 133 | struct vwork { |
| 134 | struct mm_struct *mm; |
| 135 | long npage; |
| 136 | struct work_struct work; |
| 137 | }; |
| 138 | |
| 139 | /* delayed decrement/increment for locked_vm */ |
| 140 | static void vfio_lock_acct_bg(struct work_struct *work) |
| 141 | { |
| 142 | struct vwork *vwork = container_of(work, struct vwork, work); |
| 143 | struct mm_struct *mm; |
| 144 | |
| 145 | mm = vwork->mm; |
| 146 | down_write(&mm->mmap_sem); |
| 147 | mm->locked_vm += vwork->npage; |
| 148 | up_write(&mm->mmap_sem); |
| 149 | mmput(mm); |
| 150 | kfree(vwork); |
| 151 | } |
| 152 | |
| 153 | static void vfio_lock_acct(long npage) |
| 154 | { |
| 155 | struct vwork *vwork; |
| 156 | struct mm_struct *mm; |
| 157 | |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 158 | if (!current->mm || !npage) |
| 159 | return; /* process exited or nothing to do */ |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 160 | |
| 161 | if (down_write_trylock(¤t->mm->mmap_sem)) { |
| 162 | current->mm->locked_vm += npage; |
| 163 | up_write(¤t->mm->mmap_sem); |
| 164 | return; |
| 165 | } |
| 166 | |
| 167 | /* |
| 168 | * Couldn't get mmap_sem lock, so must setup to update |
| 169 | * mm->locked_vm later. If locked_vm were atomic, we |
| 170 | * wouldn't need this silliness |
| 171 | */ |
| 172 | vwork = kmalloc(sizeof(struct vwork), GFP_KERNEL); |
| 173 | if (!vwork) |
| 174 | return; |
| 175 | mm = get_task_mm(current); |
| 176 | if (!mm) { |
| 177 | kfree(vwork); |
| 178 | return; |
| 179 | } |
| 180 | INIT_WORK(&vwork->work, vfio_lock_acct_bg); |
| 181 | vwork->mm = mm; |
| 182 | vwork->npage = npage; |
| 183 | schedule_work(&vwork->work); |
| 184 | } |
| 185 | |
| 186 | /* |
| 187 | * Some mappings aren't backed by a struct page, for example an mmap'd |
| 188 | * MMIO range for our own or another device. These use a different |
| 189 | * pfn conversion and shouldn't be tracked as locked pages. |
| 190 | */ |
| 191 | static bool is_invalid_reserved_pfn(unsigned long pfn) |
| 192 | { |
| 193 | if (pfn_valid(pfn)) { |
| 194 | bool reserved; |
| 195 | struct page *tail = pfn_to_page(pfn); |
David Rientjes | 668f9abb | 2014-03-03 15:38:18 -0800 | [diff] [blame] | 196 | struct page *head = compound_head(tail); |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 197 | reserved = !!(PageReserved(head)); |
| 198 | if (head != tail) { |
| 199 | /* |
| 200 | * "head" is not a dangling pointer |
David Rientjes | 668f9abb | 2014-03-03 15:38:18 -0800 | [diff] [blame] | 201 | * (compound_head takes care of that) |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 202 | * but the hugepage may have been split |
| 203 | * from under us (and we may not hold a |
| 204 | * reference count on the head page so it can |
| 205 | * be reused before we run PageReferenced), so |
| 206 | * we've to check PageTail before returning |
| 207 | * what we just read. |
| 208 | */ |
| 209 | smp_rmb(); |
| 210 | if (PageTail(tail)) |
| 211 | return reserved; |
| 212 | } |
| 213 | return PageReserved(tail); |
| 214 | } |
| 215 | |
| 216 | return true; |
| 217 | } |
| 218 | |
| 219 | static int put_pfn(unsigned long pfn, int prot) |
| 220 | { |
| 221 | if (!is_invalid_reserved_pfn(pfn)) { |
| 222 | struct page *page = pfn_to_page(pfn); |
| 223 | if (prot & IOMMU_WRITE) |
| 224 | SetPageDirty(page); |
| 225 | put_page(page); |
| 226 | return 1; |
| 227 | } |
| 228 | return 0; |
| 229 | } |
| 230 | |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 231 | static int vaddr_get_pfn(unsigned long vaddr, int prot, unsigned long *pfn) |
| 232 | { |
| 233 | struct page *page[1]; |
| 234 | struct vm_area_struct *vma; |
| 235 | int ret = -EFAULT; |
| 236 | |
| 237 | if (get_user_pages_fast(vaddr, 1, !!(prot & IOMMU_WRITE), page) == 1) { |
| 238 | *pfn = page_to_pfn(page[0]); |
| 239 | return 0; |
| 240 | } |
| 241 | |
| 242 | down_read(¤t->mm->mmap_sem); |
| 243 | |
| 244 | vma = find_vma_intersection(current->mm, vaddr, vaddr + 1); |
| 245 | |
| 246 | if (vma && vma->vm_flags & VM_PFNMAP) { |
| 247 | *pfn = ((vaddr - vma->vm_start) >> PAGE_SHIFT) + vma->vm_pgoff; |
| 248 | if (is_invalid_reserved_pfn(*pfn)) |
| 249 | ret = 0; |
| 250 | } |
| 251 | |
| 252 | up_read(¤t->mm->mmap_sem); |
| 253 | |
| 254 | return ret; |
| 255 | } |
| 256 | |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 257 | /* |
| 258 | * Attempt to pin pages. We really don't want to track all the pfns and |
| 259 | * the iommu can only map chunks of consecutive pfns anyway, so get the |
| 260 | * first page and all consecutive pages with the same locking. |
| 261 | */ |
| 262 | static long vfio_pin_pages(unsigned long vaddr, long npage, |
| 263 | int prot, unsigned long *pfn_base) |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 264 | { |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 265 | unsigned long limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT; |
| 266 | bool lock_cap = capable(CAP_IPC_LOCK); |
| 267 | long ret, i; |
Alex Williamson | babbf176 | 2015-02-06 10:59:16 -0700 | [diff] [blame^] | 268 | bool rsvd; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 269 | |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 270 | if (!current->mm) |
| 271 | return -ENODEV; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 272 | |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 273 | ret = vaddr_get_pfn(vaddr, prot, pfn_base); |
| 274 | if (ret) |
| 275 | return ret; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 276 | |
Alex Williamson | babbf176 | 2015-02-06 10:59:16 -0700 | [diff] [blame^] | 277 | rsvd = is_invalid_reserved_pfn(*pfn_base); |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 278 | |
Alex Williamson | babbf176 | 2015-02-06 10:59:16 -0700 | [diff] [blame^] | 279 | if (!rsvd && !lock_cap && current->mm->locked_vm + 1 > limit) { |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 280 | put_pfn(*pfn_base, prot); |
| 281 | pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", __func__, |
| 282 | limit << PAGE_SHIFT); |
| 283 | return -ENOMEM; |
| 284 | } |
| 285 | |
Alex Williamson | 5c6c2b2 | 2013-06-21 09:38:11 -0600 | [diff] [blame] | 286 | if (unlikely(disable_hugepages)) { |
Alex Williamson | babbf176 | 2015-02-06 10:59:16 -0700 | [diff] [blame^] | 287 | if (!rsvd) |
| 288 | vfio_lock_acct(1); |
Alex Williamson | 5c6c2b2 | 2013-06-21 09:38:11 -0600 | [diff] [blame] | 289 | return 1; |
| 290 | } |
| 291 | |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 292 | /* Lock all the consecutive pages from pfn_base */ |
| 293 | for (i = 1, vaddr += PAGE_SIZE; i < npage; i++, vaddr += PAGE_SIZE) { |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 294 | unsigned long pfn = 0; |
| 295 | |
| 296 | ret = vaddr_get_pfn(vaddr, prot, &pfn); |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 297 | if (ret) |
| 298 | break; |
| 299 | |
Alex Williamson | babbf176 | 2015-02-06 10:59:16 -0700 | [diff] [blame^] | 300 | if (pfn != *pfn_base + i || |
| 301 | rsvd != is_invalid_reserved_pfn(pfn)) { |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 302 | put_pfn(pfn, prot); |
| 303 | break; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 304 | } |
| 305 | |
Alex Williamson | babbf176 | 2015-02-06 10:59:16 -0700 | [diff] [blame^] | 306 | if (!rsvd && !lock_cap && |
| 307 | current->mm->locked_vm + i + 1 > limit) { |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 308 | put_pfn(pfn, prot); |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 309 | pr_warn("%s: RLIMIT_MEMLOCK (%ld) exceeded\n", |
| 310 | __func__, limit << PAGE_SHIFT); |
| 311 | break; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 312 | } |
| 313 | } |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 314 | |
Alex Williamson | babbf176 | 2015-02-06 10:59:16 -0700 | [diff] [blame^] | 315 | if (!rsvd) |
| 316 | vfio_lock_acct(i); |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 317 | |
| 318 | return i; |
| 319 | } |
| 320 | |
| 321 | static long vfio_unpin_pages(unsigned long pfn, long npage, |
| 322 | int prot, bool do_accounting) |
| 323 | { |
| 324 | unsigned long unlocked = 0; |
| 325 | long i; |
| 326 | |
| 327 | for (i = 0; i < npage; i++) |
| 328 | unlocked += put_pfn(pfn++, prot); |
| 329 | |
| 330 | if (do_accounting) |
| 331 | vfio_lock_acct(-unlocked); |
| 332 | |
| 333 | return unlocked; |
| 334 | } |
| 335 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 336 | static void vfio_unmap_unpin(struct vfio_iommu *iommu, struct vfio_dma *dma) |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 337 | { |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 338 | dma_addr_t iova = dma->iova, end = dma->iova + dma->size; |
| 339 | struct vfio_domain *domain, *d; |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 340 | long unlocked = 0; |
| 341 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 342 | if (!dma->size) |
| 343 | return; |
| 344 | /* |
| 345 | * We use the IOMMU to track the physical addresses, otherwise we'd |
| 346 | * need a much more complicated tracking system. Unfortunately that |
| 347 | * means we need to use one of the iommu domains to figure out the |
| 348 | * pfns to unpin. The rest need to be unmapped in advance so we have |
| 349 | * no iommu translations remaining when the pages are unpinned. |
| 350 | */ |
| 351 | domain = d = list_first_entry(&iommu->domain_list, |
| 352 | struct vfio_domain, next); |
| 353 | |
| 354 | list_for_each_entry_continue(d, &iommu->domain_list, next) |
| 355 | iommu_unmap(d->domain, dma->iova, dma->size); |
| 356 | |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 357 | while (iova < end) { |
Alex Williamson | 6fe1010 | 2015-02-06 10:58:56 -0700 | [diff] [blame] | 358 | size_t unmapped, len; |
| 359 | phys_addr_t phys, next; |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 360 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 361 | phys = iommu_iova_to_phys(domain->domain, iova); |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 362 | if (WARN_ON(!phys)) { |
| 363 | iova += PAGE_SIZE; |
| 364 | continue; |
| 365 | } |
| 366 | |
Alex Williamson | 6fe1010 | 2015-02-06 10:58:56 -0700 | [diff] [blame] | 367 | /* |
| 368 | * To optimize for fewer iommu_unmap() calls, each of which |
| 369 | * may require hardware cache flushing, try to find the |
| 370 | * largest contiguous physical memory chunk to unmap. |
| 371 | */ |
| 372 | for (len = PAGE_SIZE; |
| 373 | !domain->fgsp && iova + len < end; len += PAGE_SIZE) { |
| 374 | next = iommu_iova_to_phys(domain->domain, iova + len); |
| 375 | if (next != phys + len) |
| 376 | break; |
| 377 | } |
| 378 | |
| 379 | unmapped = iommu_unmap(domain->domain, iova, len); |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 380 | if (WARN_ON(!unmapped)) |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 381 | break; |
| 382 | |
| 383 | unlocked += vfio_unpin_pages(phys >> PAGE_SHIFT, |
| 384 | unmapped >> PAGE_SHIFT, |
| 385 | dma->prot, false); |
| 386 | iova += unmapped; |
| 387 | } |
| 388 | |
| 389 | vfio_lock_acct(-unlocked); |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 390 | } |
| 391 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 392 | static void vfio_remove_dma(struct vfio_iommu *iommu, struct vfio_dma *dma) |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 393 | { |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 394 | vfio_unmap_unpin(iommu, dma); |
| 395 | vfio_unlink_dma(iommu, dma); |
| 396 | kfree(dma); |
| 397 | } |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 398 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 399 | static unsigned long vfio_pgsize_bitmap(struct vfio_iommu *iommu) |
| 400 | { |
| 401 | struct vfio_domain *domain; |
| 402 | unsigned long bitmap = PAGE_MASK; |
Alex Williamson | f5bfdbf | 2013-06-25 16:01:44 -0600 | [diff] [blame] | 403 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 404 | mutex_lock(&iommu->lock); |
| 405 | list_for_each_entry(domain, &iommu->domain_list, next) |
| 406 | bitmap &= domain->domain->ops->pgsize_bitmap; |
| 407 | mutex_unlock(&iommu->lock); |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 408 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 409 | return bitmap; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 410 | } |
| 411 | |
| 412 | static int vfio_dma_do_unmap(struct vfio_iommu *iommu, |
| 413 | struct vfio_iommu_type1_dma_unmap *unmap) |
| 414 | { |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 415 | uint64_t mask; |
Alex Williamson | cd9b226 | 2013-06-21 09:37:50 -0600 | [diff] [blame] | 416 | struct vfio_dma *dma; |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 417 | size_t unmapped = 0; |
Alex Williamson | cd9b226 | 2013-06-21 09:37:50 -0600 | [diff] [blame] | 418 | int ret = 0; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 419 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 420 | mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 421 | |
| 422 | if (unmap->iova & mask) |
| 423 | return -EINVAL; |
Alex Williamson | f5bfdbf | 2013-06-25 16:01:44 -0600 | [diff] [blame] | 424 | if (!unmap->size || unmap->size & mask) |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 425 | return -EINVAL; |
| 426 | |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 427 | WARN_ON(mask & PAGE_MASK); |
| 428 | |
| 429 | mutex_lock(&iommu->lock); |
| 430 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 431 | /* |
| 432 | * vfio-iommu-type1 (v1) - User mappings were coalesced together to |
| 433 | * avoid tracking individual mappings. This means that the granularity |
| 434 | * of the original mapping was lost and the user was allowed to attempt |
| 435 | * to unmap any range. Depending on the contiguousness of physical |
| 436 | * memory and page sizes supported by the IOMMU, arbitrary unmaps may |
| 437 | * or may not have worked. We only guaranteed unmap granularity |
| 438 | * matching the original mapping; even though it was untracked here, |
| 439 | * the original mappings are reflected in IOMMU mappings. This |
| 440 | * resulted in a couple unusual behaviors. First, if a range is not |
| 441 | * able to be unmapped, ex. a set of 4k pages that was mapped as a |
| 442 | * 2M hugepage into the IOMMU, the unmap ioctl returns success but with |
| 443 | * a zero sized unmap. Also, if an unmap request overlaps the first |
| 444 | * address of a hugepage, the IOMMU will unmap the entire hugepage. |
| 445 | * This also returns success and the returned unmap size reflects the |
| 446 | * actual size unmapped. |
| 447 | * |
| 448 | * We attempt to maintain compatibility with this "v1" interface, but |
| 449 | * we take control out of the hands of the IOMMU. Therefore, an unmap |
| 450 | * request offset from the beginning of the original mapping will |
| 451 | * return success with zero sized unmap. And an unmap request covering |
| 452 | * the first iova of mapping will unmap the entire range. |
| 453 | * |
| 454 | * The v2 version of this interface intends to be more deterministic. |
| 455 | * Unmap requests must fully cover previous mappings. Multiple |
| 456 | * mappings may still be unmaped by specifying large ranges, but there |
| 457 | * must not be any previous mappings bisected by the range. An error |
| 458 | * will be returned if these conditions are not met. The v2 interface |
| 459 | * will only return success and a size of zero if there were no |
| 460 | * mappings within the range. |
| 461 | */ |
| 462 | if (iommu->v2) { |
| 463 | dma = vfio_find_dma(iommu, unmap->iova, 0); |
| 464 | if (dma && dma->iova != unmap->iova) { |
| 465 | ret = -EINVAL; |
| 466 | goto unlock; |
| 467 | } |
| 468 | dma = vfio_find_dma(iommu, unmap->iova + unmap->size - 1, 0); |
| 469 | if (dma && dma->iova + dma->size != unmap->iova + unmap->size) { |
| 470 | ret = -EINVAL; |
| 471 | goto unlock; |
| 472 | } |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 473 | } |
Alex Williamson | cd9b226 | 2013-06-21 09:37:50 -0600 | [diff] [blame] | 474 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 475 | while ((dma = vfio_find_dma(iommu, unmap->iova, unmap->size))) { |
| 476 | if (!iommu->v2 && unmap->iova > dma->iova) |
| 477 | break; |
| 478 | unmapped += dma->size; |
| 479 | vfio_remove_dma(iommu, dma); |
| 480 | } |
| 481 | |
| 482 | unlock: |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 483 | mutex_unlock(&iommu->lock); |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 484 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 485 | /* Report how much was unmapped */ |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 486 | unmap->size = unmapped; |
| 487 | |
| 488 | return ret; |
| 489 | } |
| 490 | |
| 491 | /* |
| 492 | * Turns out AMD IOMMU has a page table bug where it won't map large pages |
| 493 | * to a region that previously mapped smaller pages. This should be fixed |
| 494 | * soon, so this is just a temporary workaround to break mappings down into |
| 495 | * PAGE_SIZE. Better to map smaller pages than nothing. |
| 496 | */ |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 497 | static int map_try_harder(struct vfio_domain *domain, dma_addr_t iova, |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 498 | unsigned long pfn, long npage, int prot) |
| 499 | { |
| 500 | long i; |
| 501 | int ret; |
| 502 | |
| 503 | for (i = 0; i < npage; i++, pfn++, iova += PAGE_SIZE) { |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 504 | ret = iommu_map(domain->domain, iova, |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 505 | (phys_addr_t)pfn << PAGE_SHIFT, |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 506 | PAGE_SIZE, prot | domain->prot); |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 507 | if (ret) |
| 508 | break; |
| 509 | } |
| 510 | |
| 511 | for (; i < npage && i > 0; i--, iova -= PAGE_SIZE) |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 512 | iommu_unmap(domain->domain, iova, PAGE_SIZE); |
| 513 | |
| 514 | return ret; |
| 515 | } |
| 516 | |
| 517 | static int vfio_iommu_map(struct vfio_iommu *iommu, dma_addr_t iova, |
| 518 | unsigned long pfn, long npage, int prot) |
| 519 | { |
| 520 | struct vfio_domain *d; |
| 521 | int ret; |
| 522 | |
| 523 | list_for_each_entry(d, &iommu->domain_list, next) { |
| 524 | ret = iommu_map(d->domain, iova, (phys_addr_t)pfn << PAGE_SHIFT, |
| 525 | npage << PAGE_SHIFT, prot | d->prot); |
| 526 | if (ret) { |
| 527 | if (ret != -EBUSY || |
| 528 | map_try_harder(d, iova, pfn, npage, prot)) |
| 529 | goto unwind; |
| 530 | } |
| 531 | } |
| 532 | |
| 533 | return 0; |
| 534 | |
| 535 | unwind: |
| 536 | list_for_each_entry_continue_reverse(d, &iommu->domain_list, next) |
| 537 | iommu_unmap(d->domain, iova, npage << PAGE_SHIFT); |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 538 | |
Alex Williamson | cd9b226 | 2013-06-21 09:37:50 -0600 | [diff] [blame] | 539 | return ret; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 540 | } |
| 541 | |
| 542 | static int vfio_dma_do_map(struct vfio_iommu *iommu, |
| 543 | struct vfio_iommu_type1_dma_map *map) |
| 544 | { |
Alex Williamson | c8dbca1 | 2014-05-30 11:35:54 -0600 | [diff] [blame] | 545 | dma_addr_t iova = map->iova; |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 546 | unsigned long vaddr = map->vaddr; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 547 | size_t size = map->size; |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 548 | long npage; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 549 | int ret = 0, prot = 0; |
| 550 | uint64_t mask; |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 551 | struct vfio_dma *dma; |
Antonios Motakis | d93b3ac | 2013-10-11 10:40:46 -0600 | [diff] [blame] | 552 | unsigned long pfn; |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 553 | |
Alex Williamson | c8dbca1 | 2014-05-30 11:35:54 -0600 | [diff] [blame] | 554 | /* Verify that none of our __u64 fields overflow */ |
| 555 | if (map->size != size || map->vaddr != vaddr || map->iova != iova) |
| 556 | return -EINVAL; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 557 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 558 | mask = ((uint64_t)1 << __ffs(vfio_pgsize_bitmap(iommu))) - 1; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 559 | |
Alex Williamson | c8dbca1 | 2014-05-30 11:35:54 -0600 | [diff] [blame] | 560 | WARN_ON(mask & PAGE_MASK); |
| 561 | |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 562 | /* READ/WRITE from device perspective */ |
| 563 | if (map->flags & VFIO_DMA_MAP_FLAG_WRITE) |
| 564 | prot |= IOMMU_WRITE; |
| 565 | if (map->flags & VFIO_DMA_MAP_FLAG_READ) |
| 566 | prot |= IOMMU_READ; |
| 567 | |
Alex Williamson | c8dbca1 | 2014-05-30 11:35:54 -0600 | [diff] [blame] | 568 | if (!prot || !size || (size | iova | vaddr) & mask) |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 569 | return -EINVAL; |
| 570 | |
Alex Williamson | c8dbca1 | 2014-05-30 11:35:54 -0600 | [diff] [blame] | 571 | /* Don't allow IOVA or virtual address wrap */ |
| 572 | if (iova + size - 1 < iova || vaddr + size - 1 < vaddr) |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 573 | return -EINVAL; |
| 574 | |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 575 | mutex_lock(&iommu->lock); |
| 576 | |
Alex Williamson | c8dbca1 | 2014-05-30 11:35:54 -0600 | [diff] [blame] | 577 | if (vfio_find_dma(iommu, iova, size)) { |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 578 | mutex_unlock(&iommu->lock); |
| 579 | return -EEXIST; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 580 | } |
| 581 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 582 | dma = kzalloc(sizeof(*dma), GFP_KERNEL); |
| 583 | if (!dma) { |
| 584 | mutex_unlock(&iommu->lock); |
| 585 | return -ENOMEM; |
| 586 | } |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 587 | |
Alex Williamson | c8dbca1 | 2014-05-30 11:35:54 -0600 | [diff] [blame] | 588 | dma->iova = iova; |
| 589 | dma->vaddr = vaddr; |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 590 | dma->prot = prot; |
| 591 | |
| 592 | /* Insert zero-sized and grow as we map chunks of it */ |
| 593 | vfio_link_dma(iommu, dma); |
| 594 | |
Alex Williamson | c8dbca1 | 2014-05-30 11:35:54 -0600 | [diff] [blame] | 595 | while (size) { |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 596 | /* Pin a contiguous chunk of memory */ |
Alex Williamson | c8dbca1 | 2014-05-30 11:35:54 -0600 | [diff] [blame] | 597 | npage = vfio_pin_pages(vaddr + dma->size, |
| 598 | size >> PAGE_SHIFT, prot, &pfn); |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 599 | if (npage <= 0) { |
| 600 | WARN_ON(!npage); |
| 601 | ret = (int)npage; |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 602 | break; |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 603 | } |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 604 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 605 | /* Map it! */ |
Alex Williamson | c8dbca1 | 2014-05-30 11:35:54 -0600 | [diff] [blame] | 606 | ret = vfio_iommu_map(iommu, iova + dma->size, pfn, npage, prot); |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 607 | if (ret) { |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 608 | vfio_unpin_pages(pfn, npage, prot, true); |
| 609 | break; |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 610 | } |
| 611 | |
Alex Williamson | c8dbca1 | 2014-05-30 11:35:54 -0600 | [diff] [blame] | 612 | size -= npage << PAGE_SHIFT; |
| 613 | dma->size += npage << PAGE_SHIFT; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 614 | } |
| 615 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 616 | if (ret) |
| 617 | vfio_remove_dma(iommu, dma); |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 618 | |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 619 | mutex_unlock(&iommu->lock); |
| 620 | return ret; |
| 621 | } |
| 622 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 623 | static int vfio_bus_type(struct device *dev, void *data) |
| 624 | { |
| 625 | struct bus_type **bus = data; |
| 626 | |
| 627 | if (*bus && *bus != dev->bus) |
| 628 | return -EINVAL; |
| 629 | |
| 630 | *bus = dev->bus; |
| 631 | |
| 632 | return 0; |
| 633 | } |
| 634 | |
| 635 | static int vfio_iommu_replay(struct vfio_iommu *iommu, |
| 636 | struct vfio_domain *domain) |
| 637 | { |
| 638 | struct vfio_domain *d; |
| 639 | struct rb_node *n; |
| 640 | int ret; |
| 641 | |
| 642 | /* Arbitrarily pick the first domain in the list for lookups */ |
| 643 | d = list_first_entry(&iommu->domain_list, struct vfio_domain, next); |
| 644 | n = rb_first(&iommu->dma_list); |
| 645 | |
| 646 | /* If there's not a domain, there better not be any mappings */ |
| 647 | if (WARN_ON(n && !d)) |
| 648 | return -EINVAL; |
| 649 | |
| 650 | for (; n; n = rb_next(n)) { |
| 651 | struct vfio_dma *dma; |
| 652 | dma_addr_t iova; |
| 653 | |
| 654 | dma = rb_entry(n, struct vfio_dma, node); |
| 655 | iova = dma->iova; |
| 656 | |
| 657 | while (iova < dma->iova + dma->size) { |
| 658 | phys_addr_t phys = iommu_iova_to_phys(d->domain, iova); |
| 659 | size_t size; |
| 660 | |
| 661 | if (WARN_ON(!phys)) { |
| 662 | iova += PAGE_SIZE; |
| 663 | continue; |
| 664 | } |
| 665 | |
| 666 | size = PAGE_SIZE; |
| 667 | |
| 668 | while (iova + size < dma->iova + dma->size && |
| 669 | phys + size == iommu_iova_to_phys(d->domain, |
| 670 | iova + size)) |
| 671 | size += PAGE_SIZE; |
| 672 | |
| 673 | ret = iommu_map(domain->domain, iova, phys, |
| 674 | size, dma->prot | domain->prot); |
| 675 | if (ret) |
| 676 | return ret; |
| 677 | |
| 678 | iova += size; |
| 679 | } |
| 680 | } |
| 681 | |
| 682 | return 0; |
| 683 | } |
| 684 | |
Alex Williamson | 6fe1010 | 2015-02-06 10:58:56 -0700 | [diff] [blame] | 685 | /* |
| 686 | * We change our unmap behavior slightly depending on whether the IOMMU |
| 687 | * supports fine-grained superpages. IOMMUs like AMD-Vi will use a superpage |
| 688 | * for practically any contiguous power-of-two mapping we give it. This means |
| 689 | * we don't need to look for contiguous chunks ourselves to make unmapping |
| 690 | * more efficient. On IOMMUs with coarse-grained super pages, like Intel VT-d |
| 691 | * with discrete 2M/1G/512G/1T superpages, identifying contiguous chunks |
| 692 | * significantly boosts non-hugetlbfs mappings and doesn't seem to hurt when |
| 693 | * hugetlbfs is in use. |
| 694 | */ |
| 695 | static void vfio_test_domain_fgsp(struct vfio_domain *domain) |
| 696 | { |
| 697 | struct page *pages; |
| 698 | int ret, order = get_order(PAGE_SIZE * 2); |
| 699 | |
| 700 | pages = alloc_pages(GFP_KERNEL | __GFP_ZERO, order); |
| 701 | if (!pages) |
| 702 | return; |
| 703 | |
| 704 | ret = iommu_map(domain->domain, 0, page_to_phys(pages), PAGE_SIZE * 2, |
| 705 | IOMMU_READ | IOMMU_WRITE | domain->prot); |
| 706 | if (!ret) { |
| 707 | size_t unmapped = iommu_unmap(domain->domain, 0, PAGE_SIZE); |
| 708 | |
| 709 | if (unmapped == PAGE_SIZE) |
| 710 | iommu_unmap(domain->domain, PAGE_SIZE, PAGE_SIZE); |
| 711 | else |
| 712 | domain->fgsp = true; |
| 713 | } |
| 714 | |
| 715 | __free_pages(pages, order); |
| 716 | } |
| 717 | |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 718 | static int vfio_iommu_type1_attach_group(void *iommu_data, |
| 719 | struct iommu_group *iommu_group) |
| 720 | { |
| 721 | struct vfio_iommu *iommu = iommu_data; |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 722 | struct vfio_group *group, *g; |
| 723 | struct vfio_domain *domain, *d; |
| 724 | struct bus_type *bus = NULL; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 725 | int ret; |
| 726 | |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 727 | mutex_lock(&iommu->lock); |
| 728 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 729 | list_for_each_entry(d, &iommu->domain_list, next) { |
| 730 | list_for_each_entry(g, &d->group_list, next) { |
| 731 | if (g->iommu_group != iommu_group) |
| 732 | continue; |
| 733 | |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 734 | mutex_unlock(&iommu->lock); |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 735 | return -EINVAL; |
| 736 | } |
| 737 | } |
| 738 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 739 | group = kzalloc(sizeof(*group), GFP_KERNEL); |
| 740 | domain = kzalloc(sizeof(*domain), GFP_KERNEL); |
| 741 | if (!group || !domain) { |
| 742 | ret = -ENOMEM; |
| 743 | goto out_free; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 744 | } |
| 745 | |
| 746 | group->iommu_group = iommu_group; |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 747 | |
| 748 | /* Determine bus_type in order to allocate a domain */ |
| 749 | ret = iommu_group_for_each_dev(iommu_group, &bus, vfio_bus_type); |
| 750 | if (ret) |
| 751 | goto out_free; |
| 752 | |
| 753 | domain->domain = iommu_domain_alloc(bus); |
| 754 | if (!domain->domain) { |
| 755 | ret = -EIO; |
| 756 | goto out_free; |
| 757 | } |
| 758 | |
Will Deacon | f5c9ece | 2014-09-29 10:06:19 -0600 | [diff] [blame] | 759 | if (iommu->nesting) { |
| 760 | int attr = 1; |
| 761 | |
| 762 | ret = iommu_domain_set_attr(domain->domain, DOMAIN_ATTR_NESTING, |
| 763 | &attr); |
| 764 | if (ret) |
| 765 | goto out_domain; |
| 766 | } |
| 767 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 768 | ret = iommu_attach_group(domain->domain, iommu_group); |
| 769 | if (ret) |
| 770 | goto out_domain; |
| 771 | |
| 772 | INIT_LIST_HEAD(&domain->group_list); |
| 773 | list_add(&group->next, &domain->group_list); |
| 774 | |
| 775 | if (!allow_unsafe_interrupts && |
Joerg Roedel | eb165f0 | 2014-09-05 10:56:05 +0200 | [diff] [blame] | 776 | !iommu_capable(bus, IOMMU_CAP_INTR_REMAP)) { |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 777 | pr_warn("%s: No interrupt remapping support. Use the module param \"allow_unsafe_interrupts\" to enable VFIO IOMMU support on this platform\n", |
| 778 | __func__); |
| 779 | ret = -EPERM; |
| 780 | goto out_detach; |
| 781 | } |
| 782 | |
Joerg Roedel | eb165f0 | 2014-09-05 10:56:05 +0200 | [diff] [blame] | 783 | if (iommu_capable(bus, IOMMU_CAP_CACHE_COHERENCY)) |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 784 | domain->prot |= IOMMU_CACHE; |
| 785 | |
| 786 | /* |
| 787 | * Try to match an existing compatible domain. We don't want to |
| 788 | * preclude an IOMMU driver supporting multiple bus_types and being |
| 789 | * able to include different bus_types in the same IOMMU domain, so |
| 790 | * we test whether the domains use the same iommu_ops rather than |
| 791 | * testing if they're on the same bus_type. |
| 792 | */ |
| 793 | list_for_each_entry(d, &iommu->domain_list, next) { |
| 794 | if (d->domain->ops == domain->domain->ops && |
| 795 | d->prot == domain->prot) { |
| 796 | iommu_detach_group(domain->domain, iommu_group); |
| 797 | if (!iommu_attach_group(d->domain, iommu_group)) { |
| 798 | list_add(&group->next, &d->group_list); |
| 799 | iommu_domain_free(domain->domain); |
| 800 | kfree(domain); |
| 801 | mutex_unlock(&iommu->lock); |
| 802 | return 0; |
| 803 | } |
| 804 | |
| 805 | ret = iommu_attach_group(domain->domain, iommu_group); |
| 806 | if (ret) |
| 807 | goto out_domain; |
| 808 | } |
| 809 | } |
| 810 | |
Alex Williamson | 6fe1010 | 2015-02-06 10:58:56 -0700 | [diff] [blame] | 811 | vfio_test_domain_fgsp(domain); |
| 812 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 813 | /* replay mappings on new domains */ |
| 814 | ret = vfio_iommu_replay(iommu, domain); |
| 815 | if (ret) |
| 816 | goto out_detach; |
| 817 | |
| 818 | list_add(&domain->next, &iommu->domain_list); |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 819 | |
| 820 | mutex_unlock(&iommu->lock); |
| 821 | |
| 822 | return 0; |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 823 | |
| 824 | out_detach: |
| 825 | iommu_detach_group(domain->domain, iommu_group); |
| 826 | out_domain: |
| 827 | iommu_domain_free(domain->domain); |
| 828 | out_free: |
| 829 | kfree(domain); |
| 830 | kfree(group); |
| 831 | mutex_unlock(&iommu->lock); |
| 832 | return ret; |
| 833 | } |
| 834 | |
| 835 | static void vfio_iommu_unmap_unpin_all(struct vfio_iommu *iommu) |
| 836 | { |
| 837 | struct rb_node *node; |
| 838 | |
| 839 | while ((node = rb_first(&iommu->dma_list))) |
| 840 | vfio_remove_dma(iommu, rb_entry(node, struct vfio_dma, node)); |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 841 | } |
| 842 | |
| 843 | static void vfio_iommu_type1_detach_group(void *iommu_data, |
| 844 | struct iommu_group *iommu_group) |
| 845 | { |
| 846 | struct vfio_iommu *iommu = iommu_data; |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 847 | struct vfio_domain *domain; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 848 | struct vfio_group *group; |
| 849 | |
| 850 | mutex_lock(&iommu->lock); |
| 851 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 852 | list_for_each_entry(domain, &iommu->domain_list, next) { |
| 853 | list_for_each_entry(group, &domain->group_list, next) { |
| 854 | if (group->iommu_group != iommu_group) |
| 855 | continue; |
| 856 | |
| 857 | iommu_detach_group(domain->domain, iommu_group); |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 858 | list_del(&group->next); |
| 859 | kfree(group); |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 860 | /* |
| 861 | * Group ownership provides privilege, if the group |
| 862 | * list is empty, the domain goes away. If it's the |
| 863 | * last domain, then all the mappings go away too. |
| 864 | */ |
| 865 | if (list_empty(&domain->group_list)) { |
| 866 | if (list_is_singular(&iommu->domain_list)) |
| 867 | vfio_iommu_unmap_unpin_all(iommu); |
| 868 | iommu_domain_free(domain->domain); |
| 869 | list_del(&domain->next); |
| 870 | kfree(domain); |
| 871 | } |
| 872 | goto done; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 873 | } |
| 874 | } |
| 875 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 876 | done: |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 877 | mutex_unlock(&iommu->lock); |
| 878 | } |
| 879 | |
| 880 | static void *vfio_iommu_type1_open(unsigned long arg) |
| 881 | { |
| 882 | struct vfio_iommu *iommu; |
| 883 | |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 884 | iommu = kzalloc(sizeof(*iommu), GFP_KERNEL); |
| 885 | if (!iommu) |
| 886 | return ERR_PTR(-ENOMEM); |
| 887 | |
Will Deacon | f5c9ece | 2014-09-29 10:06:19 -0600 | [diff] [blame] | 888 | switch (arg) { |
| 889 | case VFIO_TYPE1_IOMMU: |
| 890 | break; |
| 891 | case VFIO_TYPE1_NESTING_IOMMU: |
| 892 | iommu->nesting = true; |
| 893 | case VFIO_TYPE1v2_IOMMU: |
| 894 | iommu->v2 = true; |
| 895 | break; |
| 896 | default: |
| 897 | kfree(iommu); |
| 898 | return ERR_PTR(-EINVAL); |
| 899 | } |
| 900 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 901 | INIT_LIST_HEAD(&iommu->domain_list); |
Alex Williamson | cd9b226 | 2013-06-21 09:37:50 -0600 | [diff] [blame] | 902 | iommu->dma_list = RB_ROOT; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 903 | mutex_init(&iommu->lock); |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 904 | |
| 905 | return iommu; |
| 906 | } |
| 907 | |
| 908 | static void vfio_iommu_type1_release(void *iommu_data) |
| 909 | { |
| 910 | struct vfio_iommu *iommu = iommu_data; |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 911 | struct vfio_domain *domain, *domain_tmp; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 912 | struct vfio_group *group, *group_tmp; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 913 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 914 | vfio_iommu_unmap_unpin_all(iommu); |
| 915 | |
| 916 | list_for_each_entry_safe(domain, domain_tmp, |
| 917 | &iommu->domain_list, next) { |
| 918 | list_for_each_entry_safe(group, group_tmp, |
| 919 | &domain->group_list, next) { |
| 920 | iommu_detach_group(domain->domain, group->iommu_group); |
| 921 | list_del(&group->next); |
| 922 | kfree(group); |
| 923 | } |
| 924 | iommu_domain_free(domain->domain); |
| 925 | list_del(&domain->next); |
| 926 | kfree(domain); |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 927 | } |
| 928 | |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 929 | kfree(iommu); |
| 930 | } |
| 931 | |
Alex Williamson | aa42931 | 2014-02-26 11:38:37 -0700 | [diff] [blame] | 932 | static int vfio_domains_have_iommu_cache(struct vfio_iommu *iommu) |
| 933 | { |
| 934 | struct vfio_domain *domain; |
| 935 | int ret = 1; |
| 936 | |
| 937 | mutex_lock(&iommu->lock); |
| 938 | list_for_each_entry(domain, &iommu->domain_list, next) { |
| 939 | if (!(domain->prot & IOMMU_CACHE)) { |
| 940 | ret = 0; |
| 941 | break; |
| 942 | } |
| 943 | } |
| 944 | mutex_unlock(&iommu->lock); |
| 945 | |
| 946 | return ret; |
| 947 | } |
| 948 | |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 949 | static long vfio_iommu_type1_ioctl(void *iommu_data, |
| 950 | unsigned int cmd, unsigned long arg) |
| 951 | { |
| 952 | struct vfio_iommu *iommu = iommu_data; |
| 953 | unsigned long minsz; |
| 954 | |
| 955 | if (cmd == VFIO_CHECK_EXTENSION) { |
| 956 | switch (arg) { |
| 957 | case VFIO_TYPE1_IOMMU: |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 958 | case VFIO_TYPE1v2_IOMMU: |
Will Deacon | f5c9ece | 2014-09-29 10:06:19 -0600 | [diff] [blame] | 959 | case VFIO_TYPE1_NESTING_IOMMU: |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 960 | return 1; |
Alex Williamson | aa42931 | 2014-02-26 11:38:37 -0700 | [diff] [blame] | 961 | case VFIO_DMA_CC_IOMMU: |
| 962 | if (!iommu) |
| 963 | return 0; |
| 964 | return vfio_domains_have_iommu_cache(iommu); |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 965 | default: |
| 966 | return 0; |
| 967 | } |
| 968 | } else if (cmd == VFIO_IOMMU_GET_INFO) { |
| 969 | struct vfio_iommu_type1_info info; |
| 970 | |
| 971 | minsz = offsetofend(struct vfio_iommu_type1_info, iova_pgsizes); |
| 972 | |
| 973 | if (copy_from_user(&info, (void __user *)arg, minsz)) |
| 974 | return -EFAULT; |
| 975 | |
| 976 | if (info.argsz < minsz) |
| 977 | return -EINVAL; |
| 978 | |
| 979 | info.flags = 0; |
| 980 | |
Alex Williamson | 1ef3e2b | 2014-02-26 11:38:36 -0700 | [diff] [blame] | 981 | info.iova_pgsizes = vfio_pgsize_bitmap(iommu); |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 982 | |
| 983 | return copy_to_user((void __user *)arg, &info, minsz); |
| 984 | |
| 985 | } else if (cmd == VFIO_IOMMU_MAP_DMA) { |
| 986 | struct vfio_iommu_type1_dma_map map; |
| 987 | uint32_t mask = VFIO_DMA_MAP_FLAG_READ | |
| 988 | VFIO_DMA_MAP_FLAG_WRITE; |
| 989 | |
| 990 | minsz = offsetofend(struct vfio_iommu_type1_dma_map, size); |
| 991 | |
| 992 | if (copy_from_user(&map, (void __user *)arg, minsz)) |
| 993 | return -EFAULT; |
| 994 | |
| 995 | if (map.argsz < minsz || map.flags & ~mask) |
| 996 | return -EINVAL; |
| 997 | |
| 998 | return vfio_dma_do_map(iommu, &map); |
| 999 | |
| 1000 | } else if (cmd == VFIO_IOMMU_UNMAP_DMA) { |
| 1001 | struct vfio_iommu_type1_dma_unmap unmap; |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 1002 | long ret; |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 1003 | |
| 1004 | minsz = offsetofend(struct vfio_iommu_type1_dma_unmap, size); |
| 1005 | |
| 1006 | if (copy_from_user(&unmap, (void __user *)arg, minsz)) |
| 1007 | return -EFAULT; |
| 1008 | |
| 1009 | if (unmap.argsz < minsz || unmap.flags) |
| 1010 | return -EINVAL; |
| 1011 | |
Alex Williamson | 166fd7d | 2013-06-21 09:38:02 -0600 | [diff] [blame] | 1012 | ret = vfio_dma_do_unmap(iommu, &unmap); |
| 1013 | if (ret) |
| 1014 | return ret; |
| 1015 | |
| 1016 | return copy_to_user((void __user *)arg, &unmap, minsz); |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 1017 | } |
| 1018 | |
| 1019 | return -ENOTTY; |
| 1020 | } |
| 1021 | |
| 1022 | static const struct vfio_iommu_driver_ops vfio_iommu_driver_ops_type1 = { |
| 1023 | .name = "vfio-iommu-type1", |
| 1024 | .owner = THIS_MODULE, |
| 1025 | .open = vfio_iommu_type1_open, |
| 1026 | .release = vfio_iommu_type1_release, |
| 1027 | .ioctl = vfio_iommu_type1_ioctl, |
| 1028 | .attach_group = vfio_iommu_type1_attach_group, |
| 1029 | .detach_group = vfio_iommu_type1_detach_group, |
| 1030 | }; |
| 1031 | |
| 1032 | static int __init vfio_iommu_type1_init(void) |
| 1033 | { |
Alex Williamson | 73fa0d1 | 2012-07-31 08:16:23 -0600 | [diff] [blame] | 1034 | return vfio_register_iommu_driver(&vfio_iommu_driver_ops_type1); |
| 1035 | } |
| 1036 | |
| 1037 | static void __exit vfio_iommu_type1_cleanup(void) |
| 1038 | { |
| 1039 | vfio_unregister_iommu_driver(&vfio_iommu_driver_ops_type1); |
| 1040 | } |
| 1041 | |
| 1042 | module_init(vfio_iommu_type1_init); |
| 1043 | module_exit(vfio_iommu_type1_cleanup); |
| 1044 | |
| 1045 | MODULE_VERSION(DRIVER_VERSION); |
| 1046 | MODULE_LICENSE("GPL v2"); |
| 1047 | MODULE_AUTHOR(DRIVER_AUTHOR); |
| 1048 | MODULE_DESCRIPTION(DRIVER_DESC); |