blob: eb920a4f1d6a65d6870c14b5fba1614c65604528 [file] [log] [blame]
Robin Murphy0db2e5d2015-10-01 20:13:58 +01001/*
2 * A fairly generic DMA-API to IOMMU-API glue layer.
3 *
4 * Copyright (C) 2014-2015 ARM Ltd.
5 *
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/device.h>
23#include <linux/dma-iommu.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000024#include <linux/gfp.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010025#include <linux/huge_mm.h>
26#include <linux/iommu.h>
27#include <linux/iova.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010028#include <linux/irq.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010029#include <linux/mm.h>
Robin Murphyfade1ec2016-09-12 17:14:00 +010030#include <linux/pci.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000031#include <linux/scatterlist.h>
32#include <linux/vmalloc.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010033
Robin Murphy44bb7e22016-09-12 17:13:59 +010034struct iommu_dma_msi_page {
35 struct list_head list;
36 dma_addr_t iova;
37 phys_addr_t phys;
38};
39
40struct iommu_dma_cookie {
41 struct iova_domain iovad;
42 struct list_head msi_page_list;
43 spinlock_t msi_lock;
44};
45
46static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
47{
48 return &((struct iommu_dma_cookie *)domain->iova_cookie)->iovad;
49}
50
Robin Murphy0db2e5d2015-10-01 20:13:58 +010051int iommu_dma_init(void)
52{
53 return iova_cache_get();
54}
55
56/**
57 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
58 * @domain: IOMMU domain to prepare for DMA-API usage
59 *
60 * IOMMU drivers should normally call this from their domain_alloc
61 * callback when domain->type == IOMMU_DOMAIN_DMA.
62 */
63int iommu_get_dma_cookie(struct iommu_domain *domain)
64{
Robin Murphy44bb7e22016-09-12 17:13:59 +010065 struct iommu_dma_cookie *cookie;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010066
67 if (domain->iova_cookie)
68 return -EEXIST;
69
Robin Murphy44bb7e22016-09-12 17:13:59 +010070 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
71 if (!cookie)
72 return -ENOMEM;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010073
Robin Murphy44bb7e22016-09-12 17:13:59 +010074 spin_lock_init(&cookie->msi_lock);
75 INIT_LIST_HEAD(&cookie->msi_page_list);
76 domain->iova_cookie = cookie;
77 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010078}
79EXPORT_SYMBOL(iommu_get_dma_cookie);
80
81/**
82 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
83 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
84 *
85 * IOMMU drivers should normally call this from their domain_free callback.
86 */
87void iommu_put_dma_cookie(struct iommu_domain *domain)
88{
Robin Murphy44bb7e22016-09-12 17:13:59 +010089 struct iommu_dma_cookie *cookie = domain->iova_cookie;
90 struct iommu_dma_msi_page *msi, *tmp;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010091
Robin Murphy44bb7e22016-09-12 17:13:59 +010092 if (!cookie)
Robin Murphy0db2e5d2015-10-01 20:13:58 +010093 return;
94
Robin Murphy44bb7e22016-09-12 17:13:59 +010095 if (cookie->iovad.granule)
96 put_iova_domain(&cookie->iovad);
97
98 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
99 list_del(&msi->list);
100 kfree(msi);
101 }
102 kfree(cookie);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100103 domain->iova_cookie = NULL;
104}
105EXPORT_SYMBOL(iommu_put_dma_cookie);
106
Robin Murphyfade1ec2016-09-12 17:14:00 +0100107static void iova_reserve_pci_windows(struct pci_dev *dev,
108 struct iova_domain *iovad)
109{
110 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
111 struct resource_entry *window;
112 unsigned long lo, hi;
113
114 resource_list_for_each_entry(window, &bridge->windows) {
Robin Murphyf0c31c62017-03-16 17:00:17 +0000115 if (resource_type(window->res) != IORESOURCE_MEM)
Robin Murphyfade1ec2016-09-12 17:14:00 +0100116 continue;
117
118 lo = iova_pfn(iovad, window->res->start - window->offset);
119 hi = iova_pfn(iovad, window->res->end - window->offset);
120 reserve_iova(iovad, lo, hi);
121 }
122}
123
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100124/**
125 * iommu_dma_init_domain - Initialise a DMA mapping domain
126 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
127 * @base: IOVA at which the mappable address space starts
128 * @size: Size of IOVA space
Robin Murphyfade1ec2016-09-12 17:14:00 +0100129 * @dev: Device the domain is being initialised for
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100130 *
131 * @base and @size should be exact multiples of IOMMU page granularity to
132 * avoid rounding surprises. If necessary, we reserve the page at address 0
133 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
134 * any change which could make prior IOVAs invalid will fail.
135 */
Robin Murphyfade1ec2016-09-12 17:14:00 +0100136int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
137 u64 size, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100138{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100139 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100140 unsigned long order, base_pfn, end_pfn;
141
142 if (!iovad)
143 return -ENODEV;
144
145 /* Use the smallest supported page size for IOVA granularity */
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100146 order = __ffs(domain->pgsize_bitmap);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100147 base_pfn = max_t(unsigned long, 1, base >> order);
148 end_pfn = (base + size - 1) >> order;
149
150 /* Check the domain allows at least some access to the device... */
151 if (domain->geometry.force_aperture) {
152 if (base > domain->geometry.aperture_end ||
153 base + size <= domain->geometry.aperture_start) {
154 pr_warn("specified DMA range outside IOMMU capability\n");
155 return -EFAULT;
156 }
157 /* ...then finally give it a kicking to make sure it fits */
158 base_pfn = max_t(unsigned long, base_pfn,
159 domain->geometry.aperture_start >> order);
160 end_pfn = min_t(unsigned long, end_pfn,
161 domain->geometry.aperture_end >> order);
162 }
163
164 /* All we can safely do with an existing domain is enlarge it */
165 if (iovad->start_pfn) {
166 if (1UL << order != iovad->granule ||
167 base_pfn != iovad->start_pfn ||
168 end_pfn < iovad->dma_32bit_pfn) {
169 pr_warn("Incompatible range for DMA domain\n");
170 return -EFAULT;
171 }
172 iovad->dma_32bit_pfn = end_pfn;
173 } else {
174 init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
Robin Murphyfade1ec2016-09-12 17:14:00 +0100175 if (dev && dev_is_pci(dev))
176 iova_reserve_pci_windows(to_pci_dev(dev), iovad);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100177 }
178 return 0;
179}
180EXPORT_SYMBOL(iommu_dma_init_domain);
181
182/**
183 * dma_direction_to_prot - Translate DMA API directions to IOMMU API page flags
184 * @dir: Direction of DMA transfer
185 * @coherent: Is the DMA master cache-coherent?
186 *
187 * Return: corresponding IOMMU API page protection flags
188 */
189int dma_direction_to_prot(enum dma_data_direction dir, bool coherent)
190{
191 int prot = coherent ? IOMMU_CACHE : 0;
192
193 switch (dir) {
194 case DMA_BIDIRECTIONAL:
195 return prot | IOMMU_READ | IOMMU_WRITE;
196 case DMA_TO_DEVICE:
197 return prot | IOMMU_READ;
198 case DMA_FROM_DEVICE:
199 return prot | IOMMU_WRITE;
200 default:
201 return 0;
202 }
203}
204
Robin Murphy0e080022017-03-31 15:46:05 +0100205static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
206 size_t size, dma_addr_t dma_limit, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100207{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100208 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100209 unsigned long shift = iova_shift(iovad);
Robin Murphy0e080022017-03-31 15:46:05 +0100210 unsigned long iova_len = size >> shift;
211 struct iova *iova = NULL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100212
Robin Murphyc987ff02016-08-09 17:31:35 +0100213 if (domain->geometry.force_aperture)
214 dma_limit = min(dma_limit, domain->geometry.aperture_end);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100215 /*
216 * Enforce size-alignment to be safe - there could perhaps be an
217 * attribute to control this per-device, or at least per-domain...
218 */
Robin Murphy0e080022017-03-31 15:46:05 +0100219 iova = alloc_iova(iovad, iova_len, dma_limit >> shift, true);
220 if (!iova)
221 return 0;
222
223 return (dma_addr_t)iova->pfn_lo << shift;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100224}
225
Robin Murphy0e080022017-03-31 15:46:05 +0100226static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
227 dma_addr_t iova, size_t size)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100228{
Robin Murphy0e080022017-03-31 15:46:05 +0100229 struct iova_domain *iovad = &cookie->iovad;
230 struct iova *iova_rbnode;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100231
Robin Murphy0e080022017-03-31 15:46:05 +0100232 iova_rbnode = find_iova(iovad, iova_pfn(iovad, iova));
233 if (WARN_ON(!iova_rbnode))
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100234 return;
235
Robin Murphy0e080022017-03-31 15:46:05 +0100236 __free_iova(iovad, iova_rbnode);
237}
238
239static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
240 size_t size)
241{
242 struct iova_domain *iovad = cookie_iovad(domain);
243 size_t iova_off = iova_offset(iovad, dma_addr);
244
245 dma_addr -= iova_off;
246 size = iova_align(iovad, size + iova_off);
247
248 WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
249 iommu_dma_free_iova(domain->iova_cookie, dma_addr, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100250}
251
252static void __iommu_dma_free_pages(struct page **pages, int count)
253{
254 while (count--)
255 __free_page(pages[count]);
256 kvfree(pages);
257}
258
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100259static struct page **__iommu_dma_alloc_pages(unsigned int count,
260 unsigned long order_mask, gfp_t gfp)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100261{
262 struct page **pages;
263 unsigned int i = 0, array_size = count * sizeof(*pages);
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100264
265 order_mask &= (2U << MAX_ORDER) - 1;
266 if (!order_mask)
267 return NULL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100268
269 if (array_size <= PAGE_SIZE)
270 pages = kzalloc(array_size, GFP_KERNEL);
271 else
272 pages = vzalloc(array_size);
273 if (!pages)
274 return NULL;
275
276 /* IOMMU can map any pages, so himem can also be used here */
277 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
278
279 while (count) {
280 struct page *page = NULL;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100281 unsigned int order_size;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100282
283 /*
284 * Higher-order allocations are a convenience rather
285 * than a necessity, hence using __GFP_NORETRY until
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100286 * falling back to minimum-order allocations.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100287 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100288 for (order_mask &= (2U << __fls(count)) - 1;
289 order_mask; order_mask &= ~order_size) {
290 unsigned int order = __fls(order_mask);
291
292 order_size = 1U << order;
293 page = alloc_pages((order_mask - order_size) ?
294 gfp | __GFP_NORETRY : gfp, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100295 if (!page)
296 continue;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100297 if (!order)
298 break;
299 if (!PageCompound(page)) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100300 split_page(page, order);
301 break;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100302 } else if (!split_huge_page(page)) {
303 break;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100304 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100305 __free_pages(page, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100306 }
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100307 if (!page) {
308 __iommu_dma_free_pages(pages, i);
309 return NULL;
310 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100311 count -= order_size;
312 while (order_size--)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100313 pages[i++] = page++;
314 }
315 return pages;
316}
317
318/**
319 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
320 * @dev: Device which owns this buffer
321 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
322 * @size: Size of buffer in bytes
323 * @handle: DMA address of buffer
324 *
325 * Frees both the pages associated with the buffer, and the array
326 * describing them
327 */
328void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
329 dma_addr_t *handle)
330{
Robin Murphy0e080022017-03-31 15:46:05 +0100331 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100332 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
333 *handle = DMA_ERROR_CODE;
334}
335
336/**
337 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
338 * @dev: Device to allocate memory for. Must be a real device
339 * attached to an iommu_dma_domain
340 * @size: Size of buffer in bytes
341 * @gfp: Allocation flags
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100342 * @attrs: DMA attributes for this allocation
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100343 * @prot: IOMMU mapping flags
344 * @handle: Out argument for allocated DMA handle
345 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
346 * given VA/PA are visible to the given non-coherent device.
347 *
348 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
349 * but an IOMMU which supports smaller pages might not map the whole thing.
350 *
351 * Return: Array of struct page pointers describing the buffer,
352 * or NULL on failure.
353 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100354struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700355 unsigned long attrs, int prot, dma_addr_t *handle,
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100356 void (*flush_page)(struct device *, const void *, phys_addr_t))
357{
358 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy0e080022017-03-31 15:46:05 +0100359 struct iommu_dma_cookie *cookie = domain->iova_cookie;
360 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100361 struct page **pages;
362 struct sg_table sgt;
Robin Murphy0e080022017-03-31 15:46:05 +0100363 dma_addr_t iova;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100364 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100365
366 *handle = DMA_ERROR_CODE;
367
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100368 min_size = alloc_sizes & -alloc_sizes;
369 if (min_size < PAGE_SIZE) {
370 min_size = PAGE_SIZE;
371 alloc_sizes |= PAGE_SIZE;
372 } else {
373 size = ALIGN(size, min_size);
374 }
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700375 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100376 alloc_sizes = min_size;
377
378 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
379 pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100380 if (!pages)
381 return NULL;
382
Robin Murphy0e080022017-03-31 15:46:05 +0100383 size = iova_align(iovad, size);
384 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100385 if (!iova)
386 goto out_free_pages;
387
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100388 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
389 goto out_free_iova;
390
391 if (!(prot & IOMMU_CACHE)) {
392 struct sg_mapping_iter miter;
393 /*
394 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
395 * sufficient here, so skip it by using the "wrong" direction.
396 */
397 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
398 while (sg_miter_next(&miter))
399 flush_page(dev, miter.addr, page_to_phys(miter.page));
400 sg_miter_stop(&miter);
401 }
402
Robin Murphy0e080022017-03-31 15:46:05 +0100403 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100404 < size)
405 goto out_free_sg;
406
Robin Murphy0e080022017-03-31 15:46:05 +0100407 *handle = iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100408 sg_free_table(&sgt);
409 return pages;
410
411out_free_sg:
412 sg_free_table(&sgt);
413out_free_iova:
Robin Murphy0e080022017-03-31 15:46:05 +0100414 iommu_dma_free_iova(cookie, iova, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100415out_free_pages:
416 __iommu_dma_free_pages(pages, count);
417 return NULL;
418}
419
420/**
421 * iommu_dma_mmap - Map a buffer into provided user VMA
422 * @pages: Array representing buffer from iommu_dma_alloc()
423 * @size: Size of buffer in bytes
424 * @vma: VMA describing requested userspace mapping
425 *
426 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
427 * for verifying the correct size and protection of @vma beforehand.
428 */
429
430int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
431{
432 unsigned long uaddr = vma->vm_start;
433 unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
434 int ret = -ENXIO;
435
436 for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
437 ret = vm_insert_page(vma, uaddr, pages[i]);
438 if (ret)
439 break;
440 uaddr += PAGE_SIZE;
441 }
442 return ret;
443}
444
Robin Murphyc84ed862016-11-14 12:16:26 +0000445static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
446 size_t size, int prot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100447{
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100448 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy0e080022017-03-31 15:46:05 +0100449 struct iommu_dma_cookie *cookie = domain->iova_cookie;
450 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100451 size_t iova_off = iova_offset(iovad, phys);
Robin Murphy0e080022017-03-31 15:46:05 +0100452 dma_addr_t iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100453
Robin Murphy0e080022017-03-31 15:46:05 +0100454 size = iova_align(iovad, size + iova_off);
455 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100456 if (!iova)
457 return DMA_ERROR_CODE;
458
Robin Murphy0e080022017-03-31 15:46:05 +0100459 if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
460 iommu_dma_free_iova(cookie, iova, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100461 return DMA_ERROR_CODE;
462 }
Robin Murphy0e080022017-03-31 15:46:05 +0100463 return iova + iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100464}
465
Robin Murphyc84ed862016-11-14 12:16:26 +0000466dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
467 unsigned long offset, size_t size, int prot)
468{
469 return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
470}
471
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100472void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700473 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100474{
Robin Murphy0e080022017-03-31 15:46:05 +0100475 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100476}
477
478/*
479 * Prepare a successfully-mapped scatterlist to give back to the caller.
Robin Murphy809eac52016-04-11 12:32:31 +0100480 *
481 * At this point the segments are already laid out by iommu_dma_map_sg() to
482 * avoid individually crossing any boundaries, so we merely need to check a
483 * segment's start address to avoid concatenating across one.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100484 */
485static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
486 dma_addr_t dma_addr)
487{
Robin Murphy809eac52016-04-11 12:32:31 +0100488 struct scatterlist *s, *cur = sg;
489 unsigned long seg_mask = dma_get_seg_boundary(dev);
490 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
491 int i, count = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100492
493 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100494 /* Restore this segment's original unaligned fields first */
495 unsigned int s_iova_off = sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100496 unsigned int s_length = sg_dma_len(s);
Robin Murphy809eac52016-04-11 12:32:31 +0100497 unsigned int s_iova_len = s->length;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100498
Robin Murphy809eac52016-04-11 12:32:31 +0100499 s->offset += s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100500 s->length = s_length;
Robin Murphy809eac52016-04-11 12:32:31 +0100501 sg_dma_address(s) = DMA_ERROR_CODE;
502 sg_dma_len(s) = 0;
503
504 /*
505 * Now fill in the real DMA data. If...
506 * - there is a valid output segment to append to
507 * - and this segment starts on an IOVA page boundary
508 * - but doesn't fall at a segment boundary
509 * - and wouldn't make the resulting output segment too long
510 */
511 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
512 (cur_len + s_length <= max_len)) {
513 /* ...then concatenate it with the previous one */
514 cur_len += s_length;
515 } else {
516 /* Otherwise start the next output segment */
517 if (i > 0)
518 cur = sg_next(cur);
519 cur_len = s_length;
520 count++;
521
522 sg_dma_address(cur) = dma_addr + s_iova_off;
523 }
524
525 sg_dma_len(cur) = cur_len;
526 dma_addr += s_iova_len;
527
528 if (s_length + s_iova_off < s_iova_len)
529 cur_len = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100530 }
Robin Murphy809eac52016-04-11 12:32:31 +0100531 return count;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100532}
533
534/*
535 * If mapping failed, then just restore the original list,
536 * but making sure the DMA fields are invalidated.
537 */
538static void __invalidate_sg(struct scatterlist *sg, int nents)
539{
540 struct scatterlist *s;
541 int i;
542
543 for_each_sg(sg, s, nents, i) {
544 if (sg_dma_address(s) != DMA_ERROR_CODE)
Robin Murphy07b48ac2016-03-10 19:28:12 +0000545 s->offset += sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100546 if (sg_dma_len(s))
547 s->length = sg_dma_len(s);
548 sg_dma_address(s) = DMA_ERROR_CODE;
549 sg_dma_len(s) = 0;
550 }
551}
552
553/*
554 * The DMA API client is passing in a scatterlist which could describe
555 * any old buffer layout, but the IOMMU API requires everything to be
556 * aligned to IOMMU pages. Hence the need for this complicated bit of
557 * impedance-matching, to be able to hand off a suitably-aligned list,
558 * but still preserve the original offsets and sizes for the caller.
559 */
560int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
561 int nents, int prot)
562{
563 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy0e080022017-03-31 15:46:05 +0100564 struct iommu_dma_cookie *cookie = domain->iova_cookie;
565 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100566 struct scatterlist *s, *prev = NULL;
Robin Murphy0e080022017-03-31 15:46:05 +0100567 dma_addr_t iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100568 size_t iova_len = 0;
Robin Murphy809eac52016-04-11 12:32:31 +0100569 unsigned long mask = dma_get_seg_boundary(dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100570 int i;
571
572 /*
573 * Work out how much IOVA space we need, and align the segments to
574 * IOVA granules for the IOMMU driver to handle. With some clever
575 * trickery we can modify the list in-place, but reversibly, by
Robin Murphy809eac52016-04-11 12:32:31 +0100576 * stashing the unaligned parts in the as-yet-unused DMA fields.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100577 */
578 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100579 size_t s_iova_off = iova_offset(iovad, s->offset);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100580 size_t s_length = s->length;
Robin Murphy809eac52016-04-11 12:32:31 +0100581 size_t pad_len = (mask - iova_len + 1) & mask;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100582
Robin Murphy809eac52016-04-11 12:32:31 +0100583 sg_dma_address(s) = s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100584 sg_dma_len(s) = s_length;
Robin Murphy809eac52016-04-11 12:32:31 +0100585 s->offset -= s_iova_off;
586 s_length = iova_align(iovad, s_length + s_iova_off);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100587 s->length = s_length;
588
589 /*
Robin Murphy809eac52016-04-11 12:32:31 +0100590 * Due to the alignment of our single IOVA allocation, we can
591 * depend on these assumptions about the segment boundary mask:
592 * - If mask size >= IOVA size, then the IOVA range cannot
593 * possibly fall across a boundary, so we don't care.
594 * - If mask size < IOVA size, then the IOVA range must start
595 * exactly on a boundary, therefore we can lay things out
596 * based purely on segment lengths without needing to know
597 * the actual addresses beforehand.
598 * - The mask must be a power of 2, so pad_len == 0 if
599 * iova_len == 0, thus we cannot dereference prev the first
600 * time through here (i.e. before it has a meaningful value).
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100601 */
Robin Murphy809eac52016-04-11 12:32:31 +0100602 if (pad_len && pad_len < s_length - 1) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100603 prev->length += pad_len;
604 iova_len += pad_len;
605 }
606
607 iova_len += s_length;
608 prev = s;
609 }
610
Robin Murphy0e080022017-03-31 15:46:05 +0100611 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100612 if (!iova)
613 goto out_restore_sg;
614
615 /*
616 * We'll leave any physical concatenation to the IOMMU driver's
617 * implementation - it knows better than we do.
618 */
Robin Murphy0e080022017-03-31 15:46:05 +0100619 if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100620 goto out_free_iova;
621
Robin Murphy0e080022017-03-31 15:46:05 +0100622 return __finalise_sg(dev, sg, nents, iova);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100623
624out_free_iova:
Robin Murphy0e080022017-03-31 15:46:05 +0100625 iommu_dma_free_iova(cookie, iova, iova_len);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100626out_restore_sg:
627 __invalidate_sg(sg, nents);
628 return 0;
629}
630
631void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700632 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100633{
Robin Murphy0e080022017-03-31 15:46:05 +0100634 dma_addr_t start, end;
635 struct scatterlist *tmp;
636 int i;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100637 /*
638 * The scatterlist segments are mapped into a single
639 * contiguous IOVA allocation, so this is incredibly easy.
640 */
Robin Murphy0e080022017-03-31 15:46:05 +0100641 start = sg_dma_address(sg);
642 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
643 if (sg_dma_len(tmp) == 0)
644 break;
645 sg = tmp;
646 }
647 end = sg_dma_address(sg) + sg_dma_len(sg);
648 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100649}
650
Robin Murphyc84ed862016-11-14 12:16:26 +0000651dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
652 size_t size, enum dma_data_direction dir, unsigned long attrs)
653{
654 return __iommu_dma_map(dev, phys, size,
655 dma_direction_to_prot(dir, false) | IOMMU_MMIO);
656}
657
658void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
659 size_t size, enum dma_data_direction dir, unsigned long attrs)
660{
Robin Murphy0e080022017-03-31 15:46:05 +0100661 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
Robin Murphyc84ed862016-11-14 12:16:26 +0000662}
663
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100664int iommu_dma_supported(struct device *dev, u64 mask)
665{
666 /*
667 * 'Special' IOMMUs which don't have the same addressing capability
668 * as the CPU will have to wait until we have some way to query that
669 * before they'll be able to use this framework.
670 */
671 return 1;
672}
673
674int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
675{
676 return dma_addr == DMA_ERROR_CODE;
677}
Robin Murphy44bb7e22016-09-12 17:13:59 +0100678
679static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
680 phys_addr_t msi_addr, struct iommu_domain *domain)
681{
682 struct iommu_dma_cookie *cookie = domain->iova_cookie;
683 struct iommu_dma_msi_page *msi_page;
684 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0e080022017-03-31 15:46:05 +0100685 dma_addr_t iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100686 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
687
688 msi_addr &= ~(phys_addr_t)iova_mask(iovad);
689 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
690 if (msi_page->phys == msi_addr)
691 return msi_page;
692
693 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
694 if (!msi_page)
695 return NULL;
696
Robin Murphy0e080022017-03-31 15:46:05 +0100697 iova = iommu_dma_alloc_iova(domain, iovad->granule, dma_get_mask(dev),
698 dev);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100699 if (!iova)
700 goto out_free_page;
701
702 msi_page->phys = msi_addr;
Robin Murphy0e080022017-03-31 15:46:05 +0100703 msi_page->iova = iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100704 if (iommu_map(domain, msi_page->iova, msi_addr, iovad->granule, prot))
705 goto out_free_iova;
706
707 INIT_LIST_HEAD(&msi_page->list);
708 list_add(&msi_page->list, &cookie->msi_page_list);
709 return msi_page;
710
711out_free_iova:
Robin Murphy0e080022017-03-31 15:46:05 +0100712 iommu_dma_free_iova(cookie, iova, iovad->granule);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100713out_free_page:
714 kfree(msi_page);
715 return NULL;
716}
717
718void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
719{
720 struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
721 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
722 struct iommu_dma_cookie *cookie;
723 struct iommu_dma_msi_page *msi_page;
724 phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
725 unsigned long flags;
726
727 if (!domain || !domain->iova_cookie)
728 return;
729
730 cookie = domain->iova_cookie;
731
732 /*
733 * We disable IRQs to rule out a possible inversion against
734 * irq_desc_lock if, say, someone tries to retarget the affinity
735 * of an MSI from within an IPI handler.
736 */
737 spin_lock_irqsave(&cookie->msi_lock, flags);
738 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
739 spin_unlock_irqrestore(&cookie->msi_lock, flags);
740
741 if (WARN_ON(!msi_page)) {
742 /*
743 * We're called from a void callback, so the best we can do is
744 * 'fail' by filling the message with obviously bogus values.
745 * Since we got this far due to an IOMMU being present, it's
746 * not like the existing address would have worked anyway...
747 */
748 msg->address_hi = ~0U;
749 msg->address_lo = ~0U;
750 msg->data = ~0U;
751 } else {
752 msg->address_hi = upper_32_bits(msi_page->iova);
753 msg->address_lo &= iova_mask(&cookie->iovad);
754 msg->address_lo += lower_32_bits(msi_page->iova);
755 }
756}