Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 1 | /* |
| 2 | * A fairly generic DMA-API to IOMMU-API glue layer. |
| 3 | * |
| 4 | * Copyright (C) 2014-2015 ARM Ltd. |
| 5 | * |
| 6 | * based in part on arch/arm/mm/dma-mapping.c: |
| 7 | * Copyright (C) 2000-2004 Russell King |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of the GNU General Public License version 2 as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, |
| 14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 16 | * GNU General Public License for more details. |
| 17 | * |
| 18 | * You should have received a copy of the GNU General Public License |
| 19 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 20 | */ |
| 21 | |
| 22 | #include <linux/device.h> |
| 23 | #include <linux/dma-iommu.h> |
Robin Murphy | 5b11e9c | 2015-12-18 17:01:46 +0000 | [diff] [blame] | 24 | #include <linux/gfp.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 25 | #include <linux/huge_mm.h> |
| 26 | #include <linux/iommu.h> |
| 27 | #include <linux/iova.h> |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 28 | #include <linux/irq.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 29 | #include <linux/mm.h> |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 30 | #include <linux/pci.h> |
Robin Murphy | 5b11e9c | 2015-12-18 17:01:46 +0000 | [diff] [blame] | 31 | #include <linux/scatterlist.h> |
| 32 | #include <linux/vmalloc.h> |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 33 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 34 | struct iommu_dma_msi_page { |
| 35 | struct list_head list; |
| 36 | dma_addr_t iova; |
| 37 | phys_addr_t phys; |
| 38 | }; |
| 39 | |
| 40 | struct iommu_dma_cookie { |
| 41 | struct iova_domain iovad; |
| 42 | struct list_head msi_page_list; |
| 43 | spinlock_t msi_lock; |
| 44 | }; |
| 45 | |
| 46 | static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain) |
| 47 | { |
| 48 | return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad; |
| 49 | } |
| 50 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 51 | int iommu_dma_init(void) |
| 52 | { |
| 53 | return iova_cache_get(); |
| 54 | } |
| 55 | |
| 56 | /** |
| 57 | * iommu_get_dma_cookie - Acquire DMA-API resources for a domain |
| 58 | * @domain: IOMMU domain to prepare for DMA-API usage |
| 59 | * |
| 60 | * IOMMU drivers should normally call this from their domain_alloc |
| 61 | * callback when domain->type == IOMMU_DOMAIN_DMA. |
| 62 | */ |
| 63 | int iommu_get_dma_cookie(struct iommu_domain *domain) |
| 64 | { |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 65 | struct iommu_dma_cookie *cookie; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 66 | |
| 67 | if (domain->iova_cookie) |
| 68 | return -EEXIST; |
| 69 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 70 | cookie = kzalloc(sizeof(*cookie), GFP_KERNEL); |
| 71 | if (!cookie) |
| 72 | return -ENOMEM; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 73 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 74 | spin_lock_init(&cookie->msi_lock); |
| 75 | INIT_LIST_HEAD(&cookie->msi_page_list); |
| 76 | domain->iova_cookie = cookie; |
| 77 | return 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 78 | } |
| 79 | EXPORT_SYMBOL(iommu_get_dma_cookie); |
| 80 | |
| 81 | /** |
| 82 | * iommu_put_dma_cookie - Release a domain's DMA mapping resources |
| 83 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() |
| 84 | * |
| 85 | * IOMMU drivers should normally call this from their domain_free callback. |
| 86 | */ |
| 87 | void iommu_put_dma_cookie(struct iommu_domain *domain) |
| 88 | { |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 89 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 90 | struct iommu_dma_msi_page *msi, *tmp; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 91 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 92 | if (!cookie) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 93 | return; |
| 94 | |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 95 | if (cookie->iovad.granule) |
| 96 | put_iova_domain(&cookie->iovad); |
| 97 | |
| 98 | list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) { |
| 99 | list_del(&msi->list); |
| 100 | kfree(msi); |
| 101 | } |
| 102 | kfree(cookie); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 103 | domain->iova_cookie = NULL; |
| 104 | } |
| 105 | EXPORT_SYMBOL(iommu_put_dma_cookie); |
| 106 | |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 107 | static void iova_reserve_pci_windows(struct pci_dev *dev, |
| 108 | struct iova_domain *iovad) |
| 109 | { |
| 110 | struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus); |
| 111 | struct resource_entry *window; |
| 112 | unsigned long lo, hi; |
| 113 | |
| 114 | resource_list_for_each_entry(window, &bridge->windows) { |
Robin Murphy | f0c31c6 | 2017-03-16 17:00:17 +0000 | [diff] [blame] | 115 | if (resource_type(window->res) != IORESOURCE_MEM) |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 116 | continue; |
| 117 | |
| 118 | lo = iova_pfn(iovad, window->res->start - window->offset); |
| 119 | hi = iova_pfn(iovad, window->res->end - window->offset); |
| 120 | reserve_iova(iovad, lo, hi); |
| 121 | } |
| 122 | } |
| 123 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 124 | /** |
| 125 | * iommu_dma_init_domain - Initialise a DMA mapping domain |
| 126 | * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() |
| 127 | * @base: IOVA at which the mappable address space starts |
| 128 | * @size: Size of IOVA space |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 129 | * @dev: Device the domain is being initialised for |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 130 | * |
| 131 | * @base and @size should be exact multiples of IOMMU page granularity to |
| 132 | * avoid rounding surprises. If necessary, we reserve the page at address 0 |
| 133 | * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but |
| 134 | * any change which could make prior IOVAs invalid will fail. |
| 135 | */ |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 136 | int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base, |
| 137 | u64 size, struct device *dev) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 138 | { |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 139 | struct iova_domain *iovad = cookie_iovad(domain); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 140 | unsigned long order, base_pfn, end_pfn; |
| 141 | |
| 142 | if (!iovad) |
| 143 | return -ENODEV; |
| 144 | |
| 145 | /* Use the smallest supported page size for IOVA granularity */ |
Robin Murphy | d16e0fa | 2016-04-07 18:42:06 +0100 | [diff] [blame] | 146 | order = __ffs(domain->pgsize_bitmap); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 147 | base_pfn = max_t(unsigned long, 1, base >> order); |
| 148 | end_pfn = (base + size - 1) >> order; |
| 149 | |
| 150 | /* Check the domain allows at least some access to the device... */ |
| 151 | if (domain->geometry.force_aperture) { |
| 152 | if (base > domain->geometry.aperture_end || |
| 153 | base + size <= domain->geometry.aperture_start) { |
| 154 | pr_warn("specified DMA range outside IOMMU capability\n"); |
| 155 | return -EFAULT; |
| 156 | } |
| 157 | /* ...then finally give it a kicking to make sure it fits */ |
| 158 | base_pfn = max_t(unsigned long, base_pfn, |
| 159 | domain->geometry.aperture_start >> order); |
| 160 | end_pfn = min_t(unsigned long, end_pfn, |
| 161 | domain->geometry.aperture_end >> order); |
| 162 | } |
| 163 | |
| 164 | /* All we can safely do with an existing domain is enlarge it */ |
| 165 | if (iovad->start_pfn) { |
| 166 | if (1UL << order != iovad->granule || |
| 167 | base_pfn != iovad->start_pfn || |
| 168 | end_pfn < iovad->dma_32bit_pfn) { |
| 169 | pr_warn("Incompatible range for DMA domain\n"); |
| 170 | return -EFAULT; |
| 171 | } |
| 172 | iovad->dma_32bit_pfn = end_pfn; |
| 173 | } else { |
| 174 | init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn); |
Robin Murphy | fade1ec | 2016-09-12 17:14:00 +0100 | [diff] [blame] | 175 | if (dev && dev_is_pci(dev)) |
| 176 | iova_reserve_pci_windows(to_pci_dev(dev), iovad); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 177 | } |
| 178 | return 0; |
| 179 | } |
| 180 | EXPORT_SYMBOL(iommu_dma_init_domain); |
| 181 | |
| 182 | /** |
| 183 | * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags |
| 184 | * @dir: Direction of DMA transfer |
| 185 | * @coherent: Is the DMA master cache-coherent? |
| 186 | * |
| 187 | * Return: corresponding IOMMU API page protection flags |
| 188 | */ |
| 189 | int dma_direction_to_prot(enum dma_data_direction dir, bool coherent) |
| 190 | { |
| 191 | int prot = coherent ? IOMMU_CACHE : 0; |
| 192 | |
| 193 | switch (dir) { |
| 194 | case DMA_BIDIRECTIONAL: |
| 195 | return prot | IOMMU_READ | IOMMU_WRITE; |
| 196 | case DMA_TO_DEVICE: |
| 197 | return prot | IOMMU_READ; |
| 198 | case DMA_FROM_DEVICE: |
| 199 | return prot | IOMMU_WRITE; |
| 200 | default: |
| 201 | return 0; |
| 202 | } |
| 203 | } |
| 204 | |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 205 | static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain, |
| 206 | size_t size, dma_addr_t dma_limit, struct device *dev) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 207 | { |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 208 | struct iova_domain *iovad = cookie_iovad(domain); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 209 | unsigned long shift = iova_shift(iovad); |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 210 | unsigned long iova_len = size >> shift; |
Robin Murphy | e53859e | 2017-03-31 15:46:07 +0100 | [diff] [blame^] | 211 | unsigned long iova = 0; |
| 212 | |
| 213 | /* |
| 214 | * Freeing non-power-of-two-sized allocations back into the IOVA caches |
| 215 | * will come back to bite us badly, so we have to waste a bit of space |
| 216 | * rounding up anything cacheable to make sure that can't happen. The |
| 217 | * order of the unadjusted size will still match upon freeing. |
| 218 | */ |
| 219 | if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1))) |
| 220 | iova_len = roundup_pow_of_two(iova_len); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 221 | |
Robin Murphy | c987ff0 | 2016-08-09 17:31:35 +0100 | [diff] [blame] | 222 | if (domain->geometry.force_aperture) |
| 223 | dma_limit = min(dma_limit, domain->geometry.aperture_end); |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 224 | |
Robin Murphy | e53859e | 2017-03-31 15:46:07 +0100 | [diff] [blame^] | 225 | iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift); |
| 226 | |
| 227 | return (dma_addr_t)iova << shift; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 228 | } |
| 229 | |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 230 | static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie, |
| 231 | dma_addr_t iova, size_t size) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 232 | { |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 233 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | e53859e | 2017-03-31 15:46:07 +0100 | [diff] [blame^] | 234 | unsigned long shift = iova_shift(iovad); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 235 | |
Robin Murphy | e53859e | 2017-03-31 15:46:07 +0100 | [diff] [blame^] | 236 | free_iova_fast(iovad, iova >> shift, size >> shift); |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 237 | } |
| 238 | |
| 239 | static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr, |
| 240 | size_t size) |
| 241 | { |
| 242 | struct iova_domain *iovad = cookie_iovad(domain); |
| 243 | size_t iova_off = iova_offset(iovad, dma_addr); |
| 244 | |
| 245 | dma_addr -= iova_off; |
| 246 | size = iova_align(iovad, size + iova_off); |
| 247 | |
| 248 | WARN_ON(iommu_unmap(domain, dma_addr, size) != size); |
| 249 | iommu_dma_free_iova(domain->iova_cookie, dma_addr, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 250 | } |
| 251 | |
| 252 | static void __iommu_dma_free_pages(struct page **pages, int count) |
| 253 | { |
| 254 | while (count--) |
| 255 | __free_page(pages[count]); |
| 256 | kvfree(pages); |
| 257 | } |
| 258 | |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 259 | static struct page **__iommu_dma_alloc_pages(unsigned int count, |
| 260 | unsigned long order_mask, gfp_t gfp) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 261 | { |
| 262 | struct page **pages; |
| 263 | unsigned int i = 0, array_size = count * sizeof(*pages); |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 264 | |
| 265 | order_mask &= (2U << MAX_ORDER) - 1; |
| 266 | if (!order_mask) |
| 267 | return NULL; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 268 | |
| 269 | if (array_size <= PAGE_SIZE) |
| 270 | pages = kzalloc(array_size, GFP_KERNEL); |
| 271 | else |
| 272 | pages = vzalloc(array_size); |
| 273 | if (!pages) |
| 274 | return NULL; |
| 275 | |
| 276 | /* IOMMU can map any pages, so himem can also be used here */ |
| 277 | gfp |= __GFP_NOWARN | __GFP_HIGHMEM; |
| 278 | |
| 279 | while (count) { |
| 280 | struct page *page = NULL; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 281 | unsigned int order_size; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 282 | |
| 283 | /* |
| 284 | * Higher-order allocations are a convenience rather |
| 285 | * than a necessity, hence using __GFP_NORETRY until |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 286 | * falling back to minimum-order allocations. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 287 | */ |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 288 | for (order_mask &= (2U << __fls(count)) - 1; |
| 289 | order_mask; order_mask &= ~order_size) { |
| 290 | unsigned int order = __fls(order_mask); |
| 291 | |
| 292 | order_size = 1U << order; |
| 293 | page = alloc_pages((order_mask - order_size) ? |
| 294 | gfp | __GFP_NORETRY : gfp, order); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 295 | if (!page) |
| 296 | continue; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 297 | if (!order) |
| 298 | break; |
| 299 | if (!PageCompound(page)) { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 300 | split_page(page, order); |
| 301 | break; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 302 | } else if (!split_huge_page(page)) { |
| 303 | break; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 304 | } |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 305 | __free_pages(page, order); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 306 | } |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 307 | if (!page) { |
| 308 | __iommu_dma_free_pages(pages, i); |
| 309 | return NULL; |
| 310 | } |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 311 | count -= order_size; |
| 312 | while (order_size--) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 313 | pages[i++] = page++; |
| 314 | } |
| 315 | return pages; |
| 316 | } |
| 317 | |
| 318 | /** |
| 319 | * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc() |
| 320 | * @dev: Device which owns this buffer |
| 321 | * @pages: Array of buffer pages as returned by iommu_dma_alloc() |
| 322 | * @size: Size of buffer in bytes |
| 323 | * @handle: DMA address of buffer |
| 324 | * |
| 325 | * Frees both the pages associated with the buffer, and the array |
| 326 | * describing them |
| 327 | */ |
| 328 | void iommu_dma_free(struct device *dev, struct page **pages, size_t size, |
| 329 | dma_addr_t *handle) |
| 330 | { |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 331 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 332 | __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT); |
| 333 | *handle = DMA_ERROR_CODE; |
| 334 | } |
| 335 | |
| 336 | /** |
| 337 | * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space |
| 338 | * @dev: Device to allocate memory for. Must be a real device |
| 339 | * attached to an iommu_dma_domain |
| 340 | * @size: Size of buffer in bytes |
| 341 | * @gfp: Allocation flags |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 342 | * @attrs: DMA attributes for this allocation |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 343 | * @prot: IOMMU mapping flags |
| 344 | * @handle: Out argument for allocated DMA handle |
| 345 | * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the |
| 346 | * given VA/PA are visible to the given non-coherent device. |
| 347 | * |
| 348 | * If @size is less than PAGE_SIZE, then a full CPU page will be allocated, |
| 349 | * but an IOMMU which supports smaller pages might not map the whole thing. |
| 350 | * |
| 351 | * Return: Array of struct page pointers describing the buffer, |
| 352 | * or NULL on failure. |
| 353 | */ |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 354 | struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 355 | unsigned long attrs, int prot, dma_addr_t *handle, |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 356 | void (*flush_page)(struct device *, const void *, phys_addr_t)) |
| 357 | { |
| 358 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 359 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 360 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 361 | struct page **pages; |
| 362 | struct sg_table sgt; |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 363 | dma_addr_t iova; |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 364 | unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 365 | |
| 366 | *handle = DMA_ERROR_CODE; |
| 367 | |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 368 | min_size = alloc_sizes & -alloc_sizes; |
| 369 | if (min_size < PAGE_SIZE) { |
| 370 | min_size = PAGE_SIZE; |
| 371 | alloc_sizes |= PAGE_SIZE; |
| 372 | } else { |
| 373 | size = ALIGN(size, min_size); |
| 374 | } |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 375 | if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES) |
Robin Murphy | 3b6b7e1 | 2016-04-13 17:29:10 +0100 | [diff] [blame] | 376 | alloc_sizes = min_size; |
| 377 | |
| 378 | count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 379 | pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 380 | if (!pages) |
| 381 | return NULL; |
| 382 | |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 383 | size = iova_align(iovad, size); |
| 384 | iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 385 | if (!iova) |
| 386 | goto out_free_pages; |
| 387 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 388 | if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL)) |
| 389 | goto out_free_iova; |
| 390 | |
| 391 | if (!(prot & IOMMU_CACHE)) { |
| 392 | struct sg_mapping_iter miter; |
| 393 | /* |
| 394 | * The CPU-centric flushing implied by SG_MITER_TO_SG isn't |
| 395 | * sufficient here, so skip it by using the "wrong" direction. |
| 396 | */ |
| 397 | sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG); |
| 398 | while (sg_miter_next(&miter)) |
| 399 | flush_page(dev, miter.addr, page_to_phys(miter.page)); |
| 400 | sg_miter_stop(&miter); |
| 401 | } |
| 402 | |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 403 | if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 404 | < size) |
| 405 | goto out_free_sg; |
| 406 | |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 407 | *handle = iova; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 408 | sg_free_table(&sgt); |
| 409 | return pages; |
| 410 | |
| 411 | out_free_sg: |
| 412 | sg_free_table(&sgt); |
| 413 | out_free_iova: |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 414 | iommu_dma_free_iova(cookie, iova, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 415 | out_free_pages: |
| 416 | __iommu_dma_free_pages(pages, count); |
| 417 | return NULL; |
| 418 | } |
| 419 | |
| 420 | /** |
| 421 | * iommu_dma_mmap - Map a buffer into provided user VMA |
| 422 | * @pages: Array representing buffer from iommu_dma_alloc() |
| 423 | * @size: Size of buffer in bytes |
| 424 | * @vma: VMA describing requested userspace mapping |
| 425 | * |
| 426 | * Maps the pages of the buffer in @pages into @vma. The caller is responsible |
| 427 | * for verifying the correct size and protection of @vma beforehand. |
| 428 | */ |
| 429 | |
| 430 | int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma) |
| 431 | { |
| 432 | unsigned long uaddr = vma->vm_start; |
| 433 | unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 434 | int ret = -ENXIO; |
| 435 | |
| 436 | for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) { |
| 437 | ret = vm_insert_page(vma, uaddr, pages[i]); |
| 438 | if (ret) |
| 439 | break; |
| 440 | uaddr += PAGE_SIZE; |
| 441 | } |
| 442 | return ret; |
| 443 | } |
| 444 | |
Robin Murphy | c84ed86 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 445 | static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys, |
| 446 | size_t size, int prot) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 447 | { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 448 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 449 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 450 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 451 | size_t iova_off = iova_offset(iovad, phys); |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 452 | dma_addr_t iova; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 453 | |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 454 | size = iova_align(iovad, size + iova_off); |
| 455 | iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 456 | if (!iova) |
| 457 | return DMA_ERROR_CODE; |
| 458 | |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 459 | if (iommu_map(domain, iova, phys - iova_off, size, prot)) { |
| 460 | iommu_dma_free_iova(cookie, iova, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 461 | return DMA_ERROR_CODE; |
| 462 | } |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 463 | return iova + iova_off; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 464 | } |
| 465 | |
Robin Murphy | c84ed86 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 466 | dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page, |
| 467 | unsigned long offset, size_t size, int prot) |
| 468 | { |
| 469 | return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot); |
| 470 | } |
| 471 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 472 | void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 473 | enum dma_data_direction dir, unsigned long attrs) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 474 | { |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 475 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 476 | } |
| 477 | |
| 478 | /* |
| 479 | * Prepare a successfully-mapped scatterlist to give back to the caller. |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 480 | * |
| 481 | * At this point the segments are already laid out by iommu_dma_map_sg() to |
| 482 | * avoid individually crossing any boundaries, so we merely need to check a |
| 483 | * segment's start address to avoid concatenating across one. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 484 | */ |
| 485 | static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents, |
| 486 | dma_addr_t dma_addr) |
| 487 | { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 488 | struct scatterlist *s, *cur = sg; |
| 489 | unsigned long seg_mask = dma_get_seg_boundary(dev); |
| 490 | unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev); |
| 491 | int i, count = 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 492 | |
| 493 | for_each_sg(sg, s, nents, i) { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 494 | /* Restore this segment's original unaligned fields first */ |
| 495 | unsigned int s_iova_off = sg_dma_address(s); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 496 | unsigned int s_length = sg_dma_len(s); |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 497 | unsigned int s_iova_len = s->length; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 498 | |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 499 | s->offset += s_iova_off; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 500 | s->length = s_length; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 501 | sg_dma_address(s) = DMA_ERROR_CODE; |
| 502 | sg_dma_len(s) = 0; |
| 503 | |
| 504 | /* |
| 505 | * Now fill in the real DMA data. If... |
| 506 | * - there is a valid output segment to append to |
| 507 | * - and this segment starts on an IOVA page boundary |
| 508 | * - but doesn't fall at a segment boundary |
| 509 | * - and wouldn't make the resulting output segment too long |
| 510 | */ |
| 511 | if (cur_len && !s_iova_off && (dma_addr & seg_mask) && |
| 512 | (cur_len + s_length <= max_len)) { |
| 513 | /* ...then concatenate it with the previous one */ |
| 514 | cur_len += s_length; |
| 515 | } else { |
| 516 | /* Otherwise start the next output segment */ |
| 517 | if (i > 0) |
| 518 | cur = sg_next(cur); |
| 519 | cur_len = s_length; |
| 520 | count++; |
| 521 | |
| 522 | sg_dma_address(cur) = dma_addr + s_iova_off; |
| 523 | } |
| 524 | |
| 525 | sg_dma_len(cur) = cur_len; |
| 526 | dma_addr += s_iova_len; |
| 527 | |
| 528 | if (s_length + s_iova_off < s_iova_len) |
| 529 | cur_len = 0; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 530 | } |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 531 | return count; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 532 | } |
| 533 | |
| 534 | /* |
| 535 | * If mapping failed, then just restore the original list, |
| 536 | * but making sure the DMA fields are invalidated. |
| 537 | */ |
| 538 | static void __invalidate_sg(struct scatterlist *sg, int nents) |
| 539 | { |
| 540 | struct scatterlist *s; |
| 541 | int i; |
| 542 | |
| 543 | for_each_sg(sg, s, nents, i) { |
| 544 | if (sg_dma_address(s) != DMA_ERROR_CODE) |
Robin Murphy | 07b48ac | 2016-03-10 19:28:12 +0000 | [diff] [blame] | 545 | s->offset += sg_dma_address(s); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 546 | if (sg_dma_len(s)) |
| 547 | s->length = sg_dma_len(s); |
| 548 | sg_dma_address(s) = DMA_ERROR_CODE; |
| 549 | sg_dma_len(s) = 0; |
| 550 | } |
| 551 | } |
| 552 | |
| 553 | /* |
| 554 | * The DMA API client is passing in a scatterlist which could describe |
| 555 | * any old buffer layout, but the IOMMU API requires everything to be |
| 556 | * aligned to IOMMU pages. Hence the need for this complicated bit of |
| 557 | * impedance-matching, to be able to hand off a suitably-aligned list, |
| 558 | * but still preserve the original offsets and sizes for the caller. |
| 559 | */ |
| 560 | int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 561 | int nents, int prot) |
| 562 | { |
| 563 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 564 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 565 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 566 | struct scatterlist *s, *prev = NULL; |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 567 | dma_addr_t iova; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 568 | size_t iova_len = 0; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 569 | unsigned long mask = dma_get_seg_boundary(dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 570 | int i; |
| 571 | |
| 572 | /* |
| 573 | * Work out how much IOVA space we need, and align the segments to |
| 574 | * IOVA granules for the IOMMU driver to handle. With some clever |
| 575 | * trickery we can modify the list in-place, but reversibly, by |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 576 | * stashing the unaligned parts in the as-yet-unused DMA fields. |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 577 | */ |
| 578 | for_each_sg(sg, s, nents, i) { |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 579 | size_t s_iova_off = iova_offset(iovad, s->offset); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 580 | size_t s_length = s->length; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 581 | size_t pad_len = (mask - iova_len + 1) & mask; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 582 | |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 583 | sg_dma_address(s) = s_iova_off; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 584 | sg_dma_len(s) = s_length; |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 585 | s->offset -= s_iova_off; |
| 586 | s_length = iova_align(iovad, s_length + s_iova_off); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 587 | s->length = s_length; |
| 588 | |
| 589 | /* |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 590 | * Due to the alignment of our single IOVA allocation, we can |
| 591 | * depend on these assumptions about the segment boundary mask: |
| 592 | * - If mask size >= IOVA size, then the IOVA range cannot |
| 593 | * possibly fall across a boundary, so we don't care. |
| 594 | * - If mask size < IOVA size, then the IOVA range must start |
| 595 | * exactly on a boundary, therefore we can lay things out |
| 596 | * based purely on segment lengths without needing to know |
| 597 | * the actual addresses beforehand. |
| 598 | * - The mask must be a power of 2, so pad_len == 0 if |
| 599 | * iova_len == 0, thus we cannot dereference prev the first |
| 600 | * time through here (i.e. before it has a meaningful value). |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 601 | */ |
Robin Murphy | 809eac5 | 2016-04-11 12:32:31 +0100 | [diff] [blame] | 602 | if (pad_len && pad_len < s_length - 1) { |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 603 | prev->length += pad_len; |
| 604 | iova_len += pad_len; |
| 605 | } |
| 606 | |
| 607 | iova_len += s_length; |
| 608 | prev = s; |
| 609 | } |
| 610 | |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 611 | iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 612 | if (!iova) |
| 613 | goto out_restore_sg; |
| 614 | |
| 615 | /* |
| 616 | * We'll leave any physical concatenation to the IOMMU driver's |
| 617 | * implementation - it knows better than we do. |
| 618 | */ |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 619 | if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 620 | goto out_free_iova; |
| 621 | |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 622 | return __finalise_sg(dev, sg, nents, iova); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 623 | |
| 624 | out_free_iova: |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 625 | iommu_dma_free_iova(cookie, iova, iova_len); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 626 | out_restore_sg: |
| 627 | __invalidate_sg(sg, nents); |
| 628 | return 0; |
| 629 | } |
| 630 | |
| 631 | void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
Krzysztof Kozlowski | 00085f1 | 2016-08-03 13:46:00 -0700 | [diff] [blame] | 632 | enum dma_data_direction dir, unsigned long attrs) |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 633 | { |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 634 | dma_addr_t start, end; |
| 635 | struct scatterlist *tmp; |
| 636 | int i; |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 637 | /* |
| 638 | * The scatterlist segments are mapped into a single |
| 639 | * contiguous IOVA allocation, so this is incredibly easy. |
| 640 | */ |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 641 | start = sg_dma_address(sg); |
| 642 | for_each_sg(sg_next(sg), tmp, nents - 1, i) { |
| 643 | if (sg_dma_len(tmp) == 0) |
| 644 | break; |
| 645 | sg = tmp; |
| 646 | } |
| 647 | end = sg_dma_address(sg) + sg_dma_len(sg); |
| 648 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start); |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 649 | } |
| 650 | |
Robin Murphy | c84ed86 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 651 | dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys, |
| 652 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 653 | { |
| 654 | return __iommu_dma_map(dev, phys, size, |
| 655 | dma_direction_to_prot(dir, false) | IOMMU_MMIO); |
| 656 | } |
| 657 | |
| 658 | void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle, |
| 659 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 660 | { |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 661 | __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size); |
Robin Murphy | c84ed86 | 2016-11-14 12:16:26 +0000 | [diff] [blame] | 662 | } |
| 663 | |
Robin Murphy | 0db2e5d | 2015-10-01 20:13:58 +0100 | [diff] [blame] | 664 | int iommu_dma_supported(struct device *dev, u64 mask) |
| 665 | { |
| 666 | /* |
| 667 | * 'Special' IOMMUs which don't have the same addressing capability |
| 668 | * as the CPU will have to wait until we have some way to query that |
| 669 | * before they'll be able to use this framework. |
| 670 | */ |
| 671 | return 1; |
| 672 | } |
| 673 | |
| 674 | int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 675 | { |
| 676 | return dma_addr == DMA_ERROR_CODE; |
| 677 | } |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 678 | |
| 679 | static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev, |
| 680 | phys_addr_t msi_addr, struct iommu_domain *domain) |
| 681 | { |
| 682 | struct iommu_dma_cookie *cookie = domain->iova_cookie; |
| 683 | struct iommu_dma_msi_page *msi_page; |
| 684 | struct iova_domain *iovad = &cookie->iovad; |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 685 | dma_addr_t iova; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 686 | int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO; |
| 687 | |
| 688 | msi_addr &= ~(phys_addr_t)iova_mask(iovad); |
| 689 | list_for_each_entry(msi_page, &cookie->msi_page_list, list) |
| 690 | if (msi_page->phys == msi_addr) |
| 691 | return msi_page; |
| 692 | |
| 693 | msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC); |
| 694 | if (!msi_page) |
| 695 | return NULL; |
| 696 | |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 697 | iova = iommu_dma_alloc_iova(domain, iovad->granule, dma_get_mask(dev), |
| 698 | dev); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 699 | if (!iova) |
| 700 | goto out_free_page; |
| 701 | |
| 702 | msi_page->phys = msi_addr; |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 703 | msi_page->iova = iova; |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 704 | if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot)) |
| 705 | goto out_free_iova; |
| 706 | |
| 707 | INIT_LIST_HEAD(&msi_page->list); |
| 708 | list_add(&msi_page->list, &cookie->msi_page_list); |
| 709 | return msi_page; |
| 710 | |
| 711 | out_free_iova: |
Robin Murphy | 0e08002 | 2017-03-31 15:46:05 +0100 | [diff] [blame] | 712 | iommu_dma_free_iova(cookie, iova, iovad->granule); |
Robin Murphy | 44bb7e2 | 2016-09-12 17:13:59 +0100 | [diff] [blame] | 713 | out_free_page: |
| 714 | kfree(msi_page); |
| 715 | return NULL; |
| 716 | } |
| 717 | |
| 718 | void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg) |
| 719 | { |
| 720 | struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq)); |
| 721 | struct iommu_domain *domain = iommu_get_domain_for_dev(dev); |
| 722 | struct iommu_dma_cookie *cookie; |
| 723 | struct iommu_dma_msi_page *msi_page; |
| 724 | phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo; |
| 725 | unsigned long flags; |
| 726 | |
| 727 | if (!domain || !domain->iova_cookie) |
| 728 | return; |
| 729 | |
| 730 | cookie = domain->iova_cookie; |
| 731 | |
| 732 | /* |
| 733 | * We disable IRQs to rule out a possible inversion against |
| 734 | * irq_desc_lock if, say, someone tries to retarget the affinity |
| 735 | * of an MSI from within an IPI handler. |
| 736 | */ |
| 737 | spin_lock_irqsave(&cookie->msi_lock, flags); |
| 738 | msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain); |
| 739 | spin_unlock_irqrestore(&cookie->msi_lock, flags); |
| 740 | |
| 741 | if (WARN_ON(!msi_page)) { |
| 742 | /* |
| 743 | * We're called from a void callback, so the best we can do is |
| 744 | * 'fail' by filling the message with obviously bogus values. |
| 745 | * Since we got this far due to an IOMMU being present, it's |
| 746 | * not like the existing address would have worked anyway... |
| 747 | */ |
| 748 | msg->address_hi = ~0U; |
| 749 | msg->address_lo = ~0U; |
| 750 | msg->data = ~0U; |
| 751 | } else { |
| 752 | msg->address_hi = upper_32_bits(msi_page->iova); |
| 753 | msg->address_lo &= iova_mask(&cookie->iovad); |
| 754 | msg->address_lo += lower_32_bits(msi_page->iova); |
| 755 | } |
| 756 | } |