blob: 6c6e70c56d88f97b1c354d39f37229dbc4f20cfb [file] [log] [blame]
Robin Murphy0db2e5d2015-10-01 20:13:58 +01001/*
2 * A fairly generic DMA-API to IOMMU-API glue layer.
3 *
4 * Copyright (C) 2014-2015 ARM Ltd.
5 *
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/device.h>
23#include <linux/dma-iommu.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000024#include <linux/gfp.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010025#include <linux/huge_mm.h>
26#include <linux/iommu.h>
27#include <linux/iova.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010028#include <linux/irq.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010029#include <linux/mm.h>
Robin Murphyfade1ec2016-09-12 17:14:00 +010030#include <linux/pci.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000031#include <linux/scatterlist.h>
32#include <linux/vmalloc.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010033
Robin Murphy44bb7e22016-09-12 17:13:59 +010034struct iommu_dma_msi_page {
35 struct list_head list;
36 dma_addr_t iova;
37 phys_addr_t phys;
38};
39
Robin Murphyfdbe5742017-01-19 20:57:46 +000040enum iommu_dma_cookie_type {
41 IOMMU_DMA_IOVA_COOKIE,
42 IOMMU_DMA_MSI_COOKIE,
Robin Murphy44bb7e22016-09-12 17:13:59 +010043};
44
Robin Murphyfdbe5742017-01-19 20:57:46 +000045struct iommu_dma_cookie {
46 enum iommu_dma_cookie_type type;
47 union {
48 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
49 struct iova_domain iovad;
50 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
51 dma_addr_t msi_iova;
52 };
53 struct list_head msi_page_list;
54 spinlock_t msi_lock;
55};
56
57static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
58{
59 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
60 return cookie->iovad.granule;
61 return PAGE_SIZE;
62}
63
Robin Murphy44bb7e22016-09-12 17:13:59 +010064static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
65{
Robin Murphyfdbe5742017-01-19 20:57:46 +000066 struct iommu_dma_cookie *cookie = domain->iova_cookie;
67
68 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
69 return &cookie->iovad;
70 return NULL;
71}
72
73static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
74{
75 struct iommu_dma_cookie *cookie;
76
77 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
78 if (cookie) {
79 spin_lock_init(&cookie->msi_lock);
80 INIT_LIST_HEAD(&cookie->msi_page_list);
81 cookie->type = type;
82 }
83 return cookie;
Robin Murphy44bb7e22016-09-12 17:13:59 +010084}
85
Robin Murphy0db2e5d2015-10-01 20:13:58 +010086int iommu_dma_init(void)
87{
88 return iova_cache_get();
89}
90
91/**
92 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
93 * @domain: IOMMU domain to prepare for DMA-API usage
94 *
95 * IOMMU drivers should normally call this from their domain_alloc
96 * callback when domain->type == IOMMU_DOMAIN_DMA.
97 */
98int iommu_get_dma_cookie(struct iommu_domain *domain)
99{
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100100 if (domain->iova_cookie)
101 return -EEXIST;
102
Robin Murphyfdbe5742017-01-19 20:57:46 +0000103 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
104 if (!domain->iova_cookie)
Robin Murphy44bb7e22016-09-12 17:13:59 +0100105 return -ENOMEM;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100106
Robin Murphy44bb7e22016-09-12 17:13:59 +0100107 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100108}
109EXPORT_SYMBOL(iommu_get_dma_cookie);
110
111/**
Robin Murphyfdbe5742017-01-19 20:57:46 +0000112 * iommu_get_msi_cookie - Acquire just MSI remapping resources
113 * @domain: IOMMU domain to prepare
114 * @base: Start address of IOVA region for MSI mappings
115 *
116 * Users who manage their own IOVA allocation and do not want DMA API support,
117 * but would still like to take advantage of automatic MSI remapping, can use
118 * this to initialise their own domain appropriately. Users should reserve a
119 * contiguous IOVA region, starting at @base, large enough to accommodate the
120 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
121 * used by the devices attached to @domain.
122 */
123int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
124{
125 struct iommu_dma_cookie *cookie;
126
127 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
128 return -EINVAL;
129
130 if (domain->iova_cookie)
131 return -EEXIST;
132
133 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
134 if (!cookie)
135 return -ENOMEM;
136
137 cookie->msi_iova = base;
138 domain->iova_cookie = cookie;
139 return 0;
140}
141EXPORT_SYMBOL(iommu_get_msi_cookie);
142
143/**
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100144 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
Robin Murphyfdbe5742017-01-19 20:57:46 +0000145 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
146 * iommu_get_msi_cookie()
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100147 *
148 * IOMMU drivers should normally call this from their domain_free callback.
149 */
150void iommu_put_dma_cookie(struct iommu_domain *domain)
151{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100152 struct iommu_dma_cookie *cookie = domain->iova_cookie;
153 struct iommu_dma_msi_page *msi, *tmp;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100154
Robin Murphy44bb7e22016-09-12 17:13:59 +0100155 if (!cookie)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100156 return;
157
Robin Murphyfdbe5742017-01-19 20:57:46 +0000158 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
Robin Murphy44bb7e22016-09-12 17:13:59 +0100159 put_iova_domain(&cookie->iovad);
160
161 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
162 list_del(&msi->list);
163 kfree(msi);
164 }
165 kfree(cookie);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100166 domain->iova_cookie = NULL;
167}
168EXPORT_SYMBOL(iommu_put_dma_cookie);
169
Robin Murphyfade1ec2016-09-12 17:14:00 +0100170static void iova_reserve_pci_windows(struct pci_dev *dev,
171 struct iova_domain *iovad)
172{
173 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
174 struct resource_entry *window;
175 unsigned long lo, hi;
176
177 resource_list_for_each_entry(window, &bridge->windows) {
178 if (resource_type(window->res) != IORESOURCE_MEM &&
179 resource_type(window->res) != IORESOURCE_IO)
180 continue;
181
182 lo = iova_pfn(iovad, window->res->start - window->offset);
183 hi = iova_pfn(iovad, window->res->end - window->offset);
184 reserve_iova(iovad, lo, hi);
185 }
186}
187
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100188/**
189 * iommu_dma_init_domain - Initialise a DMA mapping domain
190 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
191 * @base: IOVA at which the mappable address space starts
192 * @size: Size of IOVA space
Robin Murphyfade1ec2016-09-12 17:14:00 +0100193 * @dev: Device the domain is being initialised for
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100194 *
195 * @base and @size should be exact multiples of IOMMU page granularity to
196 * avoid rounding surprises. If necessary, we reserve the page at address 0
197 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
198 * any change which could make prior IOVAs invalid will fail.
199 */
Robin Murphyfade1ec2016-09-12 17:14:00 +0100200int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
201 u64 size, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100202{
Robin Murphyfdbe5742017-01-19 20:57:46 +0000203 struct iommu_dma_cookie *cookie = domain->iova_cookie;
204 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100205 unsigned long order, base_pfn, end_pfn;
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000206 bool pci = dev && dev_is_pci(dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100207
Robin Murphyfdbe5742017-01-19 20:57:46 +0000208 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
209 return -EINVAL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100210
211 /* Use the smallest supported page size for IOVA granularity */
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100212 order = __ffs(domain->pgsize_bitmap);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100213 base_pfn = max_t(unsigned long, 1, base >> order);
214 end_pfn = (base + size - 1) >> order;
215
216 /* Check the domain allows at least some access to the device... */
217 if (domain->geometry.force_aperture) {
218 if (base > domain->geometry.aperture_end ||
219 base + size <= domain->geometry.aperture_start) {
220 pr_warn("specified DMA range outside IOMMU capability\n");
221 return -EFAULT;
222 }
223 /* ...then finally give it a kicking to make sure it fits */
224 base_pfn = max_t(unsigned long, base_pfn,
225 domain->geometry.aperture_start >> order);
226 end_pfn = min_t(unsigned long, end_pfn,
227 domain->geometry.aperture_end >> order);
228 }
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000229 /*
230 * PCI devices may have larger DMA masks, but still prefer allocating
231 * within a 32-bit mask to avoid DAC addressing. Such limitations don't
232 * apply to the typical platform device, so for those we may as well
233 * leave the cache limit at the top of their range to save an rb_last()
234 * traversal on every allocation.
235 */
236 if (pci)
237 end_pfn &= DMA_BIT_MASK(32) >> order;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100238
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000239 /* start_pfn is always nonzero for an already-initialised domain */
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100240 if (iovad->start_pfn) {
241 if (1UL << order != iovad->granule ||
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000242 base_pfn != iovad->start_pfn) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100243 pr_warn("Incompatible range for DMA domain\n");
244 return -EFAULT;
245 }
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000246 /*
247 * If we have devices with different DMA masks, move the free
248 * area cache limit down for the benefit of the smaller one.
249 */
250 iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100251 } else {
252 init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000253 if (pci)
Robin Murphyfade1ec2016-09-12 17:14:00 +0100254 iova_reserve_pci_windows(to_pci_dev(dev), iovad);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100255 }
256 return 0;
257}
258EXPORT_SYMBOL(iommu_dma_init_domain);
259
260/**
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530261 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
262 * page flags.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100263 * @dir: Direction of DMA transfer
264 * @coherent: Is the DMA master cache-coherent?
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530265 * @attrs: DMA attributes for the mapping
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100266 *
267 * Return: corresponding IOMMU API page protection flags
268 */
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530269int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
270 unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100271{
272 int prot = coherent ? IOMMU_CACHE : 0;
273
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530274 if (attrs & DMA_ATTR_PRIVILEGED)
275 prot |= IOMMU_PRIV;
276
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100277 switch (dir) {
278 case DMA_BIDIRECTIONAL:
279 return prot | IOMMU_READ | IOMMU_WRITE;
280 case DMA_TO_DEVICE:
281 return prot | IOMMU_READ;
282 case DMA_FROM_DEVICE:
283 return prot | IOMMU_WRITE;
284 default:
285 return 0;
286 }
287}
288
Robin Murphyc987ff02016-08-09 17:31:35 +0100289static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100290 dma_addr_t dma_limit)
291{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100292 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100293 unsigned long shift = iova_shift(iovad);
294 unsigned long length = iova_align(iovad, size) >> shift;
295
Robin Murphyc987ff02016-08-09 17:31:35 +0100296 if (domain->geometry.force_aperture)
297 dma_limit = min(dma_limit, domain->geometry.aperture_end);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100298 /*
299 * Enforce size-alignment to be safe - there could perhaps be an
300 * attribute to control this per-device, or at least per-domain...
301 */
302 return alloc_iova(iovad, length, dma_limit >> shift, true);
303}
304
305/* The IOVA allocator knows what we mapped, so just unmap whatever that was */
306static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
307{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100308 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100309 unsigned long shift = iova_shift(iovad);
310 unsigned long pfn = dma_addr >> shift;
311 struct iova *iova = find_iova(iovad, pfn);
312 size_t size;
313
314 if (WARN_ON(!iova))
315 return;
316
317 size = iova_size(iova) << shift;
318 size -= iommu_unmap(domain, pfn << shift, size);
319 /* ...and if we can't, then something is horribly, horribly wrong */
320 WARN_ON(size > 0);
321 __free_iova(iovad, iova);
322}
323
324static void __iommu_dma_free_pages(struct page **pages, int count)
325{
326 while (count--)
327 __free_page(pages[count]);
328 kvfree(pages);
329}
330
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100331static struct page **__iommu_dma_alloc_pages(unsigned int count,
332 unsigned long order_mask, gfp_t gfp)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100333{
334 struct page **pages;
335 unsigned int i = 0, array_size = count * sizeof(*pages);
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100336
337 order_mask &= (2U << MAX_ORDER) - 1;
338 if (!order_mask)
339 return NULL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100340
341 if (array_size <= PAGE_SIZE)
342 pages = kzalloc(array_size, GFP_KERNEL);
343 else
344 pages = vzalloc(array_size);
345 if (!pages)
346 return NULL;
347
348 /* IOMMU can map any pages, so himem can also be used here */
349 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
350
351 while (count) {
352 struct page *page = NULL;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100353 unsigned int order_size;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100354
355 /*
356 * Higher-order allocations are a convenience rather
357 * than a necessity, hence using __GFP_NORETRY until
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100358 * falling back to minimum-order allocations.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100359 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100360 for (order_mask &= (2U << __fls(count)) - 1;
361 order_mask; order_mask &= ~order_size) {
362 unsigned int order = __fls(order_mask);
363
364 order_size = 1U << order;
365 page = alloc_pages((order_mask - order_size) ?
366 gfp | __GFP_NORETRY : gfp, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100367 if (!page)
368 continue;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100369 if (!order)
370 break;
371 if (!PageCompound(page)) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100372 split_page(page, order);
373 break;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100374 } else if (!split_huge_page(page)) {
375 break;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100376 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100377 __free_pages(page, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100378 }
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100379 if (!page) {
380 __iommu_dma_free_pages(pages, i);
381 return NULL;
382 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100383 count -= order_size;
384 while (order_size--)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100385 pages[i++] = page++;
386 }
387 return pages;
388}
389
390/**
391 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
392 * @dev: Device which owns this buffer
393 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
394 * @size: Size of buffer in bytes
395 * @handle: DMA address of buffer
396 *
397 * Frees both the pages associated with the buffer, and the array
398 * describing them
399 */
400void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
401 dma_addr_t *handle)
402{
403 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle);
404 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
405 *handle = DMA_ERROR_CODE;
406}
407
408/**
409 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
410 * @dev: Device to allocate memory for. Must be a real device
411 * attached to an iommu_dma_domain
412 * @size: Size of buffer in bytes
413 * @gfp: Allocation flags
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100414 * @attrs: DMA attributes for this allocation
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100415 * @prot: IOMMU mapping flags
416 * @handle: Out argument for allocated DMA handle
417 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
418 * given VA/PA are visible to the given non-coherent device.
419 *
420 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
421 * but an IOMMU which supports smaller pages might not map the whole thing.
422 *
423 * Return: Array of struct page pointers describing the buffer,
424 * or NULL on failure.
425 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100426struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700427 unsigned long attrs, int prot, dma_addr_t *handle,
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100428 void (*flush_page)(struct device *, const void *, phys_addr_t))
429{
430 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100431 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100432 struct iova *iova;
433 struct page **pages;
434 struct sg_table sgt;
435 dma_addr_t dma_addr;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100436 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100437
438 *handle = DMA_ERROR_CODE;
439
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100440 min_size = alloc_sizes & -alloc_sizes;
441 if (min_size < PAGE_SIZE) {
442 min_size = PAGE_SIZE;
443 alloc_sizes |= PAGE_SIZE;
444 } else {
445 size = ALIGN(size, min_size);
446 }
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700447 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100448 alloc_sizes = min_size;
449
450 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
451 pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100452 if (!pages)
453 return NULL;
454
Robin Murphyc987ff02016-08-09 17:31:35 +0100455 iova = __alloc_iova(domain, size, dev->coherent_dma_mask);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100456 if (!iova)
457 goto out_free_pages;
458
459 size = iova_align(iovad, size);
460 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
461 goto out_free_iova;
462
463 if (!(prot & IOMMU_CACHE)) {
464 struct sg_mapping_iter miter;
465 /*
466 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
467 * sufficient here, so skip it by using the "wrong" direction.
468 */
469 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
470 while (sg_miter_next(&miter))
471 flush_page(dev, miter.addr, page_to_phys(miter.page));
472 sg_miter_stop(&miter);
473 }
474
475 dma_addr = iova_dma_addr(iovad, iova);
476 if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot)
477 < size)
478 goto out_free_sg;
479
480 *handle = dma_addr;
481 sg_free_table(&sgt);
482 return pages;
483
484out_free_sg:
485 sg_free_table(&sgt);
486out_free_iova:
487 __free_iova(iovad, iova);
488out_free_pages:
489 __iommu_dma_free_pages(pages, count);
490 return NULL;
491}
492
493/**
494 * iommu_dma_mmap - Map a buffer into provided user VMA
495 * @pages: Array representing buffer from iommu_dma_alloc()
496 * @size: Size of buffer in bytes
497 * @vma: VMA describing requested userspace mapping
498 *
499 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
500 * for verifying the correct size and protection of @vma beforehand.
501 */
502
503int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
504{
505 unsigned long uaddr = vma->vm_start;
506 unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
507 int ret = -ENXIO;
508
509 for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
510 ret = vm_insert_page(vma, uaddr, pages[i]);
511 if (ret)
512 break;
513 uaddr += PAGE_SIZE;
514 }
515 return ret;
516}
517
Robin Murphy51f8cc92016-11-14 12:16:26 +0000518static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
519 size_t size, int prot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100520{
521 dma_addr_t dma_addr;
522 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100523 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100524 size_t iova_off = iova_offset(iovad, phys);
525 size_t len = iova_align(iovad, size + iova_off);
Robin Murphyc987ff02016-08-09 17:31:35 +0100526 struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev));
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100527
528 if (!iova)
529 return DMA_ERROR_CODE;
530
531 dma_addr = iova_dma_addr(iovad, iova);
532 if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) {
533 __free_iova(iovad, iova);
534 return DMA_ERROR_CODE;
535 }
536 return dma_addr + iova_off;
537}
538
Robin Murphy51f8cc92016-11-14 12:16:26 +0000539dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
540 unsigned long offset, size_t size, int prot)
541{
542 return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
543}
544
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100545void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700546 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100547{
548 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
549}
550
551/*
552 * Prepare a successfully-mapped scatterlist to give back to the caller.
Robin Murphy809eac52016-04-11 12:32:31 +0100553 *
554 * At this point the segments are already laid out by iommu_dma_map_sg() to
555 * avoid individually crossing any boundaries, so we merely need to check a
556 * segment's start address to avoid concatenating across one.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100557 */
558static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
559 dma_addr_t dma_addr)
560{
Robin Murphy809eac52016-04-11 12:32:31 +0100561 struct scatterlist *s, *cur = sg;
562 unsigned long seg_mask = dma_get_seg_boundary(dev);
563 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
564 int i, count = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100565
566 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100567 /* Restore this segment's original unaligned fields first */
568 unsigned int s_iova_off = sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100569 unsigned int s_length = sg_dma_len(s);
Robin Murphy809eac52016-04-11 12:32:31 +0100570 unsigned int s_iova_len = s->length;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100571
Robin Murphy809eac52016-04-11 12:32:31 +0100572 s->offset += s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100573 s->length = s_length;
Robin Murphy809eac52016-04-11 12:32:31 +0100574 sg_dma_address(s) = DMA_ERROR_CODE;
575 sg_dma_len(s) = 0;
576
577 /*
578 * Now fill in the real DMA data. If...
579 * - there is a valid output segment to append to
580 * - and this segment starts on an IOVA page boundary
581 * - but doesn't fall at a segment boundary
582 * - and wouldn't make the resulting output segment too long
583 */
584 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
585 (cur_len + s_length <= max_len)) {
586 /* ...then concatenate it with the previous one */
587 cur_len += s_length;
588 } else {
589 /* Otherwise start the next output segment */
590 if (i > 0)
591 cur = sg_next(cur);
592 cur_len = s_length;
593 count++;
594
595 sg_dma_address(cur) = dma_addr + s_iova_off;
596 }
597
598 sg_dma_len(cur) = cur_len;
599 dma_addr += s_iova_len;
600
601 if (s_length + s_iova_off < s_iova_len)
602 cur_len = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100603 }
Robin Murphy809eac52016-04-11 12:32:31 +0100604 return count;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100605}
606
607/*
608 * If mapping failed, then just restore the original list,
609 * but making sure the DMA fields are invalidated.
610 */
611static void __invalidate_sg(struct scatterlist *sg, int nents)
612{
613 struct scatterlist *s;
614 int i;
615
616 for_each_sg(sg, s, nents, i) {
617 if (sg_dma_address(s) != DMA_ERROR_CODE)
Robin Murphy07b48ac2016-03-10 19:28:12 +0000618 s->offset += sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100619 if (sg_dma_len(s))
620 s->length = sg_dma_len(s);
621 sg_dma_address(s) = DMA_ERROR_CODE;
622 sg_dma_len(s) = 0;
623 }
624}
625
626/*
627 * The DMA API client is passing in a scatterlist which could describe
628 * any old buffer layout, but the IOMMU API requires everything to be
629 * aligned to IOMMU pages. Hence the need for this complicated bit of
630 * impedance-matching, to be able to hand off a suitably-aligned list,
631 * but still preserve the original offsets and sizes for the caller.
632 */
633int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
634 int nents, int prot)
635{
636 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100637 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100638 struct iova *iova;
639 struct scatterlist *s, *prev = NULL;
640 dma_addr_t dma_addr;
641 size_t iova_len = 0;
Robin Murphy809eac52016-04-11 12:32:31 +0100642 unsigned long mask = dma_get_seg_boundary(dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100643 int i;
644
645 /*
646 * Work out how much IOVA space we need, and align the segments to
647 * IOVA granules for the IOMMU driver to handle. With some clever
648 * trickery we can modify the list in-place, but reversibly, by
Robin Murphy809eac52016-04-11 12:32:31 +0100649 * stashing the unaligned parts in the as-yet-unused DMA fields.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100650 */
651 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100652 size_t s_iova_off = iova_offset(iovad, s->offset);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100653 size_t s_length = s->length;
Robin Murphy809eac52016-04-11 12:32:31 +0100654 size_t pad_len = (mask - iova_len + 1) & mask;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100655
Robin Murphy809eac52016-04-11 12:32:31 +0100656 sg_dma_address(s) = s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100657 sg_dma_len(s) = s_length;
Robin Murphy809eac52016-04-11 12:32:31 +0100658 s->offset -= s_iova_off;
659 s_length = iova_align(iovad, s_length + s_iova_off);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100660 s->length = s_length;
661
662 /*
Robin Murphy809eac52016-04-11 12:32:31 +0100663 * Due to the alignment of our single IOVA allocation, we can
664 * depend on these assumptions about the segment boundary mask:
665 * - If mask size >= IOVA size, then the IOVA range cannot
666 * possibly fall across a boundary, so we don't care.
667 * - If mask size < IOVA size, then the IOVA range must start
668 * exactly on a boundary, therefore we can lay things out
669 * based purely on segment lengths without needing to know
670 * the actual addresses beforehand.
671 * - The mask must be a power of 2, so pad_len == 0 if
672 * iova_len == 0, thus we cannot dereference prev the first
673 * time through here (i.e. before it has a meaningful value).
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100674 */
Robin Murphy809eac52016-04-11 12:32:31 +0100675 if (pad_len && pad_len < s_length - 1) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100676 prev->length += pad_len;
677 iova_len += pad_len;
678 }
679
680 iova_len += s_length;
681 prev = s;
682 }
683
Robin Murphyc987ff02016-08-09 17:31:35 +0100684 iova = __alloc_iova(domain, iova_len, dma_get_mask(dev));
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100685 if (!iova)
686 goto out_restore_sg;
687
688 /*
689 * We'll leave any physical concatenation to the IOMMU driver's
690 * implementation - it knows better than we do.
691 */
692 dma_addr = iova_dma_addr(iovad, iova);
693 if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
694 goto out_free_iova;
695
696 return __finalise_sg(dev, sg, nents, dma_addr);
697
698out_free_iova:
699 __free_iova(iovad, iova);
700out_restore_sg:
701 __invalidate_sg(sg, nents);
702 return 0;
703}
704
705void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700706 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100707{
708 /*
709 * The scatterlist segments are mapped into a single
710 * contiguous IOVA allocation, so this is incredibly easy.
711 */
712 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
713}
714
Robin Murphy51f8cc92016-11-14 12:16:26 +0000715dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
716 size_t size, enum dma_data_direction dir, unsigned long attrs)
717{
718 return __iommu_dma_map(dev, phys, size,
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530719 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
Robin Murphy51f8cc92016-11-14 12:16:26 +0000720}
721
722void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
723 size_t size, enum dma_data_direction dir, unsigned long attrs)
724{
725 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
726}
727
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100728int iommu_dma_supported(struct device *dev, u64 mask)
729{
730 /*
731 * 'Special' IOMMUs which don't have the same addressing capability
732 * as the CPU will have to wait until we have some way to query that
733 * before they'll be able to use this framework.
734 */
735 return 1;
736}
737
738int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
739{
740 return dma_addr == DMA_ERROR_CODE;
741}
Robin Murphy44bb7e22016-09-12 17:13:59 +0100742
743static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
744 phys_addr_t msi_addr, struct iommu_domain *domain)
745{
746 struct iommu_dma_cookie *cookie = domain->iova_cookie;
747 struct iommu_dma_msi_page *msi_page;
Robin Murphyfdbe5742017-01-19 20:57:46 +0000748 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100749 struct iova *iova;
750 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
Robin Murphyfdbe5742017-01-19 20:57:46 +0000751 size_t size = cookie_msi_granule(cookie);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100752
Robin Murphyfdbe5742017-01-19 20:57:46 +0000753 msi_addr &= ~(phys_addr_t)(size - 1);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100754 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
755 if (msi_page->phys == msi_addr)
756 return msi_page;
757
758 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
759 if (!msi_page)
760 return NULL;
761
Robin Murphy44bb7e22016-09-12 17:13:59 +0100762 msi_page->phys = msi_addr;
Robin Murphyfdbe5742017-01-19 20:57:46 +0000763 if (iovad) {
764 iova = __alloc_iova(domain, size, dma_get_mask(dev));
765 if (!iova)
766 goto out_free_page;
767 msi_page->iova = iova_dma_addr(iovad, iova);
768 } else {
769 msi_page->iova = cookie->msi_iova;
770 cookie->msi_iova += size;
771 }
772
773 if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
Robin Murphy44bb7e22016-09-12 17:13:59 +0100774 goto out_free_iova;
775
776 INIT_LIST_HEAD(&msi_page->list);
777 list_add(&msi_page->list, &cookie->msi_page_list);
778 return msi_page;
779
780out_free_iova:
Robin Murphyfdbe5742017-01-19 20:57:46 +0000781 if (iovad)
782 __free_iova(iovad, iova);
783 else
784 cookie->msi_iova -= size;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100785out_free_page:
786 kfree(msi_page);
787 return NULL;
788}
789
790void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
791{
792 struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
793 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
794 struct iommu_dma_cookie *cookie;
795 struct iommu_dma_msi_page *msi_page;
796 phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
797 unsigned long flags;
798
799 if (!domain || !domain->iova_cookie)
800 return;
801
802 cookie = domain->iova_cookie;
803
804 /*
805 * We disable IRQs to rule out a possible inversion against
806 * irq_desc_lock if, say, someone tries to retarget the affinity
807 * of an MSI from within an IPI handler.
808 */
809 spin_lock_irqsave(&cookie->msi_lock, flags);
810 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
811 spin_unlock_irqrestore(&cookie->msi_lock, flags);
812
813 if (WARN_ON(!msi_page)) {
814 /*
815 * We're called from a void callback, so the best we can do is
816 * 'fail' by filling the message with obviously bogus values.
817 * Since we got this far due to an IOMMU being present, it's
818 * not like the existing address would have worked anyway...
819 */
820 msg->address_hi = ~0U;
821 msg->address_lo = ~0U;
822 msg->data = ~0U;
823 } else {
824 msg->address_hi = upper_32_bits(msi_page->iova);
Robin Murphyfdbe5742017-01-19 20:57:46 +0000825 msg->address_lo &= cookie_msi_granule(cookie) - 1;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100826 msg->address_lo += lower_32_bits(msi_page->iova);
827 }
828}