blob: da4d2838387d9a0fd5bceb609dce24521221b0d0 [file] [log] [blame]
Robin Murphy0db2e5d2015-10-01 20:13:58 +01001/*
2 * A fairly generic DMA-API to IOMMU-API glue layer.
3 *
4 * Copyright (C) 2014-2015 ARM Ltd.
5 *
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/device.h>
23#include <linux/dma-iommu.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000024#include <linux/gfp.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010025#include <linux/huge_mm.h>
26#include <linux/iommu.h>
27#include <linux/iova.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010028#include <linux/irq.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010029#include <linux/mm.h>
Robin Murphyfade1ec2016-09-12 17:14:00 +010030#include <linux/pci.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000031#include <linux/scatterlist.h>
32#include <linux/vmalloc.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010033
Robin Murphy44bb7e22016-09-12 17:13:59 +010034struct iommu_dma_msi_page {
35 struct list_head list;
36 dma_addr_t iova;
37 phys_addr_t phys;
38};
39
40struct iommu_dma_cookie {
41 struct iova_domain iovad;
42 struct list_head msi_page_list;
43 spinlock_t msi_lock;
44};
45
46static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
47{
48 return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
49}
50
Robin Murphy0db2e5d2015-10-01 20:13:58 +010051int iommu_dma_init(void)
52{
53 return iova_cache_get();
54}
55
56/**
57 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
58 * @domain: IOMMU domain to prepare for DMA-API usage
59 *
60 * IOMMU drivers should normally call this from their domain_alloc
61 * callback when domain->type == IOMMU_DOMAIN_DMA.
62 */
63int iommu_get_dma_cookie(struct iommu_domain *domain)
64{
Robin Murphy44bb7e22016-09-12 17:13:59 +010065 struct iommu_dma_cookie *cookie;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010066
67 if (domain->iova_cookie)
68 return -EEXIST;
69
Robin Murphy44bb7e22016-09-12 17:13:59 +010070 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
71 if (!cookie)
72 return -ENOMEM;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010073
Robin Murphy44bb7e22016-09-12 17:13:59 +010074 spin_lock_init(&cookie->msi_lock);
75 INIT_LIST_HEAD(&cookie->msi_page_list);
76 domain->iova_cookie = cookie;
77 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010078}
79EXPORT_SYMBOL(iommu_get_dma_cookie);
80
81/**
82 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
83 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
84 *
85 * IOMMU drivers should normally call this from their domain_free callback.
86 */
87void iommu_put_dma_cookie(struct iommu_domain *domain)
88{
Robin Murphy44bb7e22016-09-12 17:13:59 +010089 struct iommu_dma_cookie *cookie = domain->iova_cookie;
90 struct iommu_dma_msi_page *msi, *tmp;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010091
Robin Murphy44bb7e22016-09-12 17:13:59 +010092 if (!cookie)
Robin Murphy0db2e5d2015-10-01 20:13:58 +010093 return;
94
Robin Murphy44bb7e22016-09-12 17:13:59 +010095 if (cookie->iovad.granule)
96 put_iova_domain(&cookie->iovad);
97
98 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
99 list_del(&msi->list);
100 kfree(msi);
101 }
102 kfree(cookie);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100103 domain->iova_cookie = NULL;
104}
105EXPORT_SYMBOL(iommu_put_dma_cookie);
106
Robin Murphyfade1ec2016-09-12 17:14:00 +0100107static void iova_reserve_pci_windows(struct pci_dev *dev,
108 struct iova_domain *iovad)
109{
110 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
111 struct resource_entry *window;
112 unsigned long lo, hi;
113
114 resource_list_for_each_entry(window, &bridge->windows) {
Robin Murphyf0c31c62017-03-16 17:00:17 +0000115 if (resource_type(window->res) != IORESOURCE_MEM)
Robin Murphyfade1ec2016-09-12 17:14:00 +0100116 continue;
117
118 lo = iova_pfn(iovad, window->res->start - window->offset);
119 hi = iova_pfn(iovad, window->res->end - window->offset);
120 reserve_iova(iovad, lo, hi);
121 }
122}
123
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100124/**
125 * iommu_dma_init_domain - Initialise a DMA mapping domain
126 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
127 * @base: IOVA at which the mappable address space starts
128 * @size: Size of IOVA space
Robin Murphyfade1ec2016-09-12 17:14:00 +0100129 * @dev: Device the domain is being initialised for
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100130 *
131 * @base and @size should be exact multiples of IOMMU page granularity to
132 * avoid rounding surprises. If necessary, we reserve the page at address 0
133 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
134 * any change which could make prior IOVAs invalid will fail.
135 */
Robin Murphyfade1ec2016-09-12 17:14:00 +0100136int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
137 u64 size, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100138{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100139 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100140 unsigned long order, base_pfn, end_pfn;
141
142 if (!iovad)
143 return -ENODEV;
144
145 /* Use the smallest supported page size for IOVA granularity */
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100146 order = __ffs(domain->pgsize_bitmap);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100147 base_pfn = max_t(unsigned long, 1, base >> order);
148 end_pfn = (base + size - 1) >> order;
149
150 /* Check the domain allows at least some access to the device... */
151 if (domain->geometry.force_aperture) {
152 if (base > domain->geometry.aperture_end ||
153 base + size <= domain->geometry.aperture_start) {
154 pr_warn("specified DMA range outside IOMMU capability\n");
155 return -EFAULT;
156 }
157 /* ...then finally give it a kicking to make sure it fits */
158 base_pfn = max_t(unsigned long, base_pfn,
159 domain->geometry.aperture_start >> order);
160 end_pfn = min_t(unsigned long, end_pfn,
161 domain->geometry.aperture_end >> order);
162 }
163
164 /* All we can safely do with an existing domain is enlarge it */
165 if (iovad->start_pfn) {
166 if (1UL << order != iovad->granule ||
167 base_pfn != iovad->start_pfn ||
168 end_pfn < iovad->dma_32bit_pfn) {
169 pr_warn("Incompatible range for DMA domain\n");
170 return -EFAULT;
171 }
172 iovad->dma_32bit_pfn = end_pfn;
173 } else {
174 init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
Robin Murphyfade1ec2016-09-12 17:14:00 +0100175 if (dev && dev_is_pci(dev))
176 iova_reserve_pci_windows(to_pci_dev(dev), iovad);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100177 }
178 return 0;
179}
180EXPORT_SYMBOL(iommu_dma_init_domain);
181
182/**
183 * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
184 * @dir: Direction of DMA transfer
185 * @coherent: Is the DMA master cache-coherent?
186 *
187 * Return: corresponding IOMMU API page protection flags
188 */
189int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
190{
191 int prot = coherent ? IOMMU_CACHE : 0;
192
193 switch (dir) {
194 case DMA_BIDIRECTIONAL:
195 return prot | IOMMU_READ | IOMMU_WRITE;
196 case DMA_TO_DEVICE:
197 return prot | IOMMU_READ;
198 case DMA_FROM_DEVICE:
199 return prot | IOMMU_WRITE;
200 default:
201 return 0;
202 }
203}
204
Robin Murphy0e080022017-03-31 15:46:05 +0100205static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
206 size_t size, dma_addr_t dma_limit, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100207{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100208 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100209 unsigned long shift = iova_shift(iovad);
Robin Murphy0e080022017-03-31 15:46:05 +0100210 unsigned long iova_len = size >> shift;
Robin Murphye53859e2017-03-31 15:46:07 +0100211 unsigned long iova = 0;
Patrick Daly67e617b2017-07-20 21:07:08 -0700212 dma_addr_t limit;
Robin Murphye53859e2017-03-31 15:46:07 +0100213
214 /*
215 * Freeing non-power-of-two-sized allocations back into the IOVA caches
216 * will come back to bite us badly, so we have to waste a bit of space
217 * rounding up anything cacheable to make sure that can't happen. The
218 * order of the unadjusted size will still match upon freeing.
219 */
220 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
221 iova_len = roundup_pow_of_two(iova_len);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100222
Robin Murphyc987ff02016-08-09 17:31:35 +0100223 if (domain->geometry.force_aperture)
224 dma_limit = min(dma_limit, domain->geometry.aperture_end);
Robin Murphy0e080022017-03-31 15:46:05 +0100225
Patrick Daly67e617b2017-07-20 21:07:08 -0700226 /*
227 * Ensure iova is within range specified in iommu_dma_init_domain().
228 * This also prevents unnecessary work iterating through the entire
229 * rb_tree.
230 */
231 limit = min_t(dma_addr_t, dma_limit >> shift, iovad->dma_32bit_pfn);
232 iova = alloc_iova_fast(iovad, iova_len, limit);
Robin Murphye53859e2017-03-31 15:46:07 +0100233
234 return (dma_addr_t)iova << shift;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100235}
236
Robin Murphy0e080022017-03-31 15:46:05 +0100237static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
238 dma_addr_t iova, size_t size)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100239{
Robin Murphy0e080022017-03-31 15:46:05 +0100240 struct iova_domain *iovad = &cookie->iovad;
Robin Murphye53859e2017-03-31 15:46:07 +0100241 unsigned long shift = iova_shift(iovad);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100242
Robin Murphye53859e2017-03-31 15:46:07 +0100243 free_iova_fast(iovad, iova >> shift, size >> shift);
Robin Murphy0e080022017-03-31 15:46:05 +0100244}
245
246static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
247 size_t size)
248{
249 struct iova_domain *iovad = cookie_iovad(domain);
250 size_t iova_off = iova_offset(iovad, dma_addr);
251
252 dma_addr -= iova_off;
253 size = iova_align(iovad, size + iova_off);
254
255 WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
256 iommu_dma_free_iova(domain->iova_cookie, dma_addr, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100257}
258
259static void __iommu_dma_free_pages(struct page **pages, int count)
260{
261 while (count--)
262 __free_page(pages[count]);
263 kvfree(pages);
264}
265
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100266static struct page **__iommu_dma_alloc_pages(unsigned int count,
267 unsigned long order_mask, gfp_t gfp)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100268{
269 struct page **pages;
270 unsigned int i = 0, array_size = count * sizeof(*pages);
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100271
272 order_mask &= (2U << MAX_ORDER) - 1;
273 if (!order_mask)
274 return NULL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100275
276 if (array_size <= PAGE_SIZE)
277 pages = kzalloc(array_size, GFP_KERNEL);
278 else
279 pages = vzalloc(array_size);
280 if (!pages)
281 return NULL;
282
283 /* IOMMU can map any pages, so himem can also be used here */
284 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
285
286 while (count) {
287 struct page *page = NULL;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100288 unsigned int order_size;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100289
290 /*
291 * Higher-order allocations are a convenience rather
292 * than a necessity, hence using __GFP_NORETRY until
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100293 * falling back to minimum-order allocations.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100294 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100295 for (order_mask &= (2U << __fls(count)) - 1;
296 order_mask; order_mask &= ~order_size) {
297 unsigned int order = __fls(order_mask);
298
299 order_size = 1U << order;
300 page = alloc_pages((order_mask - order_size) ?
301 gfp | __GFP_NORETRY : gfp, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100302 if (!page)
303 continue;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100304 if (!order)
305 break;
306 if (!PageCompound(page)) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100307 split_page(page, order);
308 break;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100309 } else if (!split_huge_page(page)) {
310 break;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100311 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100312 __free_pages(page, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100313 }
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100314 if (!page) {
315 __iommu_dma_free_pages(pages, i);
316 return NULL;
317 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100318 count -= order_size;
319 while (order_size--)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100320 pages[i++] = page++;
321 }
322 return pages;
323}
324
325/**
326 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
327 * @dev: Device which owns this buffer
328 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
329 * @size: Size of buffer in bytes
330 * @handle: DMA address of buffer
331 *
332 * Frees both the pages associated with the buffer, and the array
333 * describing them
334 */
335void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
336 dma_addr_t *handle)
337{
Robin Murphy0e080022017-03-31 15:46:05 +0100338 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100339 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
340 *handle = DMA_ERROR_CODE;
341}
342
343/**
344 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
345 * @dev: Device to allocate memory for. Must be a real device
346 * attached to an iommu_dma_domain
347 * @size: Size of buffer in bytes
348 * @gfp: Allocation flags
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100349 * @attrs: DMA attributes for this allocation
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100350 * @prot: IOMMU mapping flags
351 * @handle: Out argument for allocated DMA handle
352 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
353 * given VA/PA are visible to the given non-coherent device.
354 *
355 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
356 * but an IOMMU which supports smaller pages might not map the whole thing.
357 *
358 * Return: Array of struct page pointers describing the buffer,
359 * or NULL on failure.
360 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100361struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700362 unsigned long attrs, int prot, dma_addr_t *handle,
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100363 void (*flush_page)(struct device *, const void *, phys_addr_t))
364{
365 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy0e080022017-03-31 15:46:05 +0100366 struct iommu_dma_cookie *cookie = domain->iova_cookie;
367 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100368 struct page **pages;
369 struct sg_table sgt;
Robin Murphy0e080022017-03-31 15:46:05 +0100370 dma_addr_t iova;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100371 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100372
373 *handle = DMA_ERROR_CODE;
374
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100375 min_size = alloc_sizes & -alloc_sizes;
376 if (min_size < PAGE_SIZE) {
377 min_size = PAGE_SIZE;
378 alloc_sizes |= PAGE_SIZE;
379 } else {
380 size = ALIGN(size, min_size);
381 }
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700382 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100383 alloc_sizes = min_size;
384
385 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
386 pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100387 if (!pages)
388 return NULL;
389
Robin Murphy0e080022017-03-31 15:46:05 +0100390 size = iova_align(iovad, size);
391 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100392 if (!iova)
393 goto out_free_pages;
394
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100395 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
396 goto out_free_iova;
397
398 if (!(prot & IOMMU_CACHE)) {
399 struct sg_mapping_iter miter;
400 /*
401 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
402 * sufficient here, so skip it by using the "wrong" direction.
403 */
404 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
405 while (sg_miter_next(&miter))
406 flush_page(dev, miter.addr, page_to_phys(miter.page));
407 sg_miter_stop(&miter);
408 }
409
Robin Murphy0e080022017-03-31 15:46:05 +0100410 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100411 < size)
412 goto out_free_sg;
413
Robin Murphy0e080022017-03-31 15:46:05 +0100414 *handle = iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100415 sg_free_table(&sgt);
416 return pages;
417
418out_free_sg:
419 sg_free_table(&sgt);
420out_free_iova:
Robin Murphy0e080022017-03-31 15:46:05 +0100421 iommu_dma_free_iova(cookie, iova, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100422out_free_pages:
423 __iommu_dma_free_pages(pages, count);
424 return NULL;
425}
426
427/**
428 * iommu_dma_mmap - Map a buffer into provided user VMA
429 * @pages: Array representing buffer from iommu_dma_alloc()
430 * @size: Size of buffer in bytes
431 * @vma: VMA describing requested userspace mapping
432 *
433 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
434 * for verifying the correct size and protection of @vma beforehand.
435 */
436
437int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
438{
439 unsigned long uaddr = vma->vm_start;
440 unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
441 int ret = -ENXIO;
442
443 for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
444 ret = vm_insert_page(vma, uaddr, pages[i]);
445 if (ret)
446 break;
447 uaddr += PAGE_SIZE;
448 }
449 return ret;
450}
451
Robin Murphyc84ed862016-11-14 12:16:26 +0000452static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
453 size_t size, int prot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100454{
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100455 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy0e080022017-03-31 15:46:05 +0100456 struct iommu_dma_cookie *cookie = domain->iova_cookie;
457 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100458 size_t iova_off = iova_offset(iovad, phys);
Robin Murphy0e080022017-03-31 15:46:05 +0100459 dma_addr_t iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100460
Robin Murphy0e080022017-03-31 15:46:05 +0100461 size = iova_align(iovad, size + iova_off);
462 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100463 if (!iova)
464 return DMA_ERROR_CODE;
465
Robin Murphy0e080022017-03-31 15:46:05 +0100466 if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
467 iommu_dma_free_iova(cookie, iova, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100468 return DMA_ERROR_CODE;
469 }
Robin Murphy0e080022017-03-31 15:46:05 +0100470 return iova + iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100471}
472
Robin Murphyc84ed862016-11-14 12:16:26 +0000473dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
474 unsigned long offset, size_t size, int prot)
475{
476 return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
477}
478
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100479void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700480 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100481{
Robin Murphy0e080022017-03-31 15:46:05 +0100482 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100483}
484
485/*
486 * Prepare a successfully-mapped scatterlist to give back to the caller.
Robin Murphy809eac52016-04-11 12:32:31 +0100487 *
488 * At this point the segments are already laid out by iommu_dma_map_sg() to
489 * avoid individually crossing any boundaries, so we merely need to check a
490 * segment's start address to avoid concatenating across one.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100491 */
492static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
493 dma_addr_t dma_addr)
494{
Robin Murphy809eac52016-04-11 12:32:31 +0100495 struct scatterlist *s, *cur = sg;
496 unsigned long seg_mask = dma_get_seg_boundary(dev);
497 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
498 int i, count = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100499
500 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100501 /* Restore this segment's original unaligned fields first */
502 unsigned int s_iova_off = sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100503 unsigned int s_length = sg_dma_len(s);
Robin Murphy809eac52016-04-11 12:32:31 +0100504 unsigned int s_iova_len = s->length;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100505
Robin Murphy809eac52016-04-11 12:32:31 +0100506 s->offset += s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100507 s->length = s_length;
Robin Murphy809eac52016-04-11 12:32:31 +0100508 sg_dma_address(s) = DMA_ERROR_CODE;
509 sg_dma_len(s) = 0;
510
511 /*
512 * Now fill in the real DMA data. If...
513 * - there is a valid output segment to append to
514 * - and this segment starts on an IOVA page boundary
515 * - but doesn't fall at a segment boundary
516 * - and wouldn't make the resulting output segment too long
517 */
518 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
519 (cur_len + s_length <= max_len)) {
520 /* ...then concatenate it with the previous one */
521 cur_len += s_length;
522 } else {
523 /* Otherwise start the next output segment */
524 if (i > 0)
525 cur = sg_next(cur);
526 cur_len = s_length;
527 count++;
528
529 sg_dma_address(cur) = dma_addr + s_iova_off;
530 }
531
532 sg_dma_len(cur) = cur_len;
533 dma_addr += s_iova_len;
534
535 if (s_length + s_iova_off < s_iova_len)
536 cur_len = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100537 }
Robin Murphy809eac52016-04-11 12:32:31 +0100538 return count;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100539}
540
541/*
542 * If mapping failed, then just restore the original list,
543 * but making sure the DMA fields are invalidated.
544 */
545static void __invalidate_sg(struct scatterlist *sg, int nents)
546{
547 struct scatterlist *s;
548 int i;
549
550 for_each_sg(sg, s, nents, i) {
551 if (sg_dma_address(s) != DMA_ERROR_CODE)
Robin Murphy07b48ac2016-03-10 19:28:12 +0000552 s->offset += sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100553 if (sg_dma_len(s))
554 s->length = sg_dma_len(s);
555 sg_dma_address(s) = DMA_ERROR_CODE;
556 sg_dma_len(s) = 0;
557 }
558}
559
560/*
561 * The DMA API client is passing in a scatterlist which could describe
562 * any old buffer layout, but the IOMMU API requires everything to be
563 * aligned to IOMMU pages. Hence the need for this complicated bit of
564 * impedance-matching, to be able to hand off a suitably-aligned list,
565 * but still preserve the original offsets and sizes for the caller.
566 */
567int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
568 int nents, int prot)
569{
570 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy0e080022017-03-31 15:46:05 +0100571 struct iommu_dma_cookie *cookie = domain->iova_cookie;
572 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100573 struct scatterlist *s, *prev = NULL;
Robin Murphy0e080022017-03-31 15:46:05 +0100574 dma_addr_t iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100575 size_t iova_len = 0;
Robin Murphy809eac52016-04-11 12:32:31 +0100576 unsigned long mask = dma_get_seg_boundary(dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100577 int i;
578
579 /*
580 * Work out how much IOVA space we need, and align the segments to
581 * IOVA granules for the IOMMU driver to handle. With some clever
582 * trickery we can modify the list in-place, but reversibly, by
Robin Murphy809eac52016-04-11 12:32:31 +0100583 * stashing the unaligned parts in the as-yet-unused DMA fields.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100584 */
585 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100586 size_t s_iova_off = iova_offset(iovad, s->offset);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100587 size_t s_length = s->length;
Robin Murphy809eac52016-04-11 12:32:31 +0100588 size_t pad_len = (mask - iova_len + 1) & mask;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100589
Robin Murphy809eac52016-04-11 12:32:31 +0100590 sg_dma_address(s) = s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100591 sg_dma_len(s) = s_length;
Robin Murphy809eac52016-04-11 12:32:31 +0100592 s->offset -= s_iova_off;
593 s_length = iova_align(iovad, s_length + s_iova_off);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100594 s->length = s_length;
595
596 /*
Robin Murphy809eac52016-04-11 12:32:31 +0100597 * Due to the alignment of our single IOVA allocation, we can
598 * depend on these assumptions about the segment boundary mask:
599 * - If mask size >= IOVA size, then the IOVA range cannot
600 * possibly fall across a boundary, so we don't care.
601 * - If mask size < IOVA size, then the IOVA range must start
602 * exactly on a boundary, therefore we can lay things out
603 * based purely on segment lengths without needing to know
604 * the actual addresses beforehand.
605 * - The mask must be a power of 2, so pad_len == 0 if
606 * iova_len == 0, thus we cannot dereference prev the first
607 * time through here (i.e. before it has a meaningful value).
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100608 */
Robin Murphy809eac52016-04-11 12:32:31 +0100609 if (pad_len && pad_len < s_length - 1) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100610 prev->length += pad_len;
611 iova_len += pad_len;
612 }
613
614 iova_len += s_length;
615 prev = s;
616 }
617
Robin Murphy0e080022017-03-31 15:46:05 +0100618 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100619 if (!iova)
620 goto out_restore_sg;
621
622 /*
623 * We'll leave any physical concatenation to the IOMMU driver's
624 * implementation - it knows better than we do.
625 */
Robin Murphy0e080022017-03-31 15:46:05 +0100626 if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100627 goto out_free_iova;
628
Robin Murphy0e080022017-03-31 15:46:05 +0100629 return __finalise_sg(dev, sg, nents, iova);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100630
631out_free_iova:
Robin Murphy0e080022017-03-31 15:46:05 +0100632 iommu_dma_free_iova(cookie, iova, iova_len);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100633out_restore_sg:
634 __invalidate_sg(sg, nents);
635 return 0;
636}
637
638void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700639 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100640{
Robin Murphy0e080022017-03-31 15:46:05 +0100641 dma_addr_t start, end;
642 struct scatterlist *tmp;
643 int i;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100644 /*
645 * The scatterlist segments are mapped into a single
646 * contiguous IOVA allocation, so this is incredibly easy.
647 */
Robin Murphy0e080022017-03-31 15:46:05 +0100648 start = sg_dma_address(sg);
649 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
650 if (sg_dma_len(tmp) == 0)
651 break;
652 sg = tmp;
653 }
654 end = sg_dma_address(sg) + sg_dma_len(sg);
655 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100656}
657
Robin Murphyc84ed862016-11-14 12:16:26 +0000658dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
659 size_t size, enum dma_data_direction dir, unsigned long attrs)
660{
661 return __iommu_dma_map(dev, phys, size,
662 dma_direction_to_prot(dir, false) | IOMMU_MMIO);
663}
664
665void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
666 size_t size, enum dma_data_direction dir, unsigned long attrs)
667{
Robin Murphy0e080022017-03-31 15:46:05 +0100668 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
Robin Murphyc84ed862016-11-14 12:16:26 +0000669}
670
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100671int iommu_dma_supported(struct device *dev, u64 mask)
672{
673 /*
674 * 'Special' IOMMUs which don't have the same addressing capability
675 * as the CPU will have to wait until we have some way to query that
676 * before they'll be able to use this framework.
677 */
678 return 1;
679}
680
681int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
682{
683 return dma_addr == DMA_ERROR_CODE;
684}
Robin Murphy44bb7e22016-09-12 17:13:59 +0100685
686static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
687 phys_addr_t msi_addr, struct iommu_domain *domain)
688{
689 struct iommu_dma_cookie *cookie = domain->iova_cookie;
690 struct iommu_dma_msi_page *msi_page;
691 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0e080022017-03-31 15:46:05 +0100692 dma_addr_t iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100693 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
694
695 msi_addr &= ~(phys_addr_t)iova_mask(iovad);
696 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
697 if (msi_page->phys == msi_addr)
698 return msi_page;
699
700 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
701 if (!msi_page)
702 return NULL;
703
Robin Murphy0e080022017-03-31 15:46:05 +0100704 iova = iommu_dma_alloc_iova(domain, iovad->granule, dma_get_mask(dev),
705 dev);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100706 if (!iova)
707 goto out_free_page;
708
709 msi_page->phys = msi_addr;
Robin Murphy0e080022017-03-31 15:46:05 +0100710 msi_page->iova = iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100711 if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
712 goto out_free_iova;
713
714 INIT_LIST_HEAD(&msi_page->list);
715 list_add(&msi_page->list, &cookie->msi_page_list);
716 return msi_page;
717
718out_free_iova:
Robin Murphy0e080022017-03-31 15:46:05 +0100719 iommu_dma_free_iova(cookie, iova, iovad->granule);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100720out_free_page:
721 kfree(msi_page);
722 return NULL;
723}
724
725void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
726{
727 struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
728 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
729 struct iommu_dma_cookie *cookie;
730 struct iommu_dma_msi_page *msi_page;
731 phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
732 unsigned long flags;
733
734 if (!domain || !domain->iova_cookie)
735 return;
736
737 cookie = domain->iova_cookie;
738
739 /*
740 * We disable IRQs to rule out a possible inversion against
741 * irq_desc_lock if, say, someone tries to retarget the affinity
742 * of an MSI from within an IPI handler.
743 */
744 spin_lock_irqsave(&cookie->msi_lock, flags);
745 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
746 spin_unlock_irqrestore(&cookie->msi_lock, flags);
747
748 if (WARN_ON(!msi_page)) {
749 /*
750 * We're called from a void callback, so the best we can do is
751 * 'fail' by filling the message with obviously bogus values.
752 * Since we got this far due to an IOMMU being present, it's
753 * not like the existing address would have worked anyway...
754 */
755 msg->address_hi = ~0U;
756 msg->address_lo = ~0U;
757 msg->data = ~0U;
758 } else {
759 msg->address_hi = upper_32_bits(msi_page->iova);
760 msg->address_lo &= iova_mask(&cookie->iovad);
761 msg->address_lo += lower_32_bits(msi_page->iova);
762 }
763}