blob: 5787f919f4ec8348d699d721e755c7b6afedaa8f [file] [log] [blame]
Robin Murphy0db2e5d2015-10-01 20:13:58 +01001/*
2 * A fairly generic DMA-API to IOMMU-API glue layer.
3 *
4 * Copyright (C) 2014-2015 ARM Ltd.
5 *
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/device.h>
23#include <linux/dma-iommu.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000024#include <linux/gfp.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010025#include <linux/huge_mm.h>
26#include <linux/iommu.h>
27#include <linux/iova.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010028#include <linux/irq.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010029#include <linux/mm.h>
Robin Murphyfade1ec2016-09-12 17:14:00 +010030#include <linux/pci.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000031#include <linux/scatterlist.h>
32#include <linux/vmalloc.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010033
Robin Murphy44bb7e22016-09-12 17:13:59 +010034struct iommu_dma_msi_page {
35 struct list_head list;
36 dma_addr_t iova;
37 phys_addr_t phys;
38};
39
Robin Murphyfdbe5742017-01-19 20:57:46 +000040enum iommu_dma_cookie_type {
41 IOMMU_DMA_IOVA_COOKIE,
42 IOMMU_DMA_MSI_COOKIE,
Robin Murphy44bb7e22016-09-12 17:13:59 +010043};
44
Robin Murphyfdbe5742017-01-19 20:57:46 +000045struct iommu_dma_cookie {
46 enum iommu_dma_cookie_type type;
47 union {
48 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
49 struct iova_domain iovad;
50 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
51 dma_addr_t msi_iova;
52 };
53 struct list_head msi_page_list;
54 spinlock_t msi_lock;
55};
56
57static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
58{
59 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
60 return cookie->iovad.granule;
61 return PAGE_SIZE;
62}
63
Robin Murphy44bb7e22016-09-12 17:13:59 +010064static inline struct iova_domain *cookie_iovad(struct iommu_domain *domain)
65{
Robin Murphyfdbe5742017-01-19 20:57:46 +000066 struct iommu_dma_cookie *cookie = domain->iova_cookie;
67
68 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
69 return &cookie->iovad;
70 return NULL;
71}
72
73static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
74{
75 struct iommu_dma_cookie *cookie;
76
77 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
78 if (cookie) {
79 spin_lock_init(&cookie->msi_lock);
80 INIT_LIST_HEAD(&cookie->msi_page_list);
81 cookie->type = type;
82 }
83 return cookie;
Robin Murphy44bb7e22016-09-12 17:13:59 +010084}
85
Robin Murphy0db2e5d2015-10-01 20:13:58 +010086int iommu_dma_init(void)
87{
88 return iova_cache_get();
89}
90
91/**
92 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
93 * @domain: IOMMU domain to prepare for DMA-API usage
94 *
95 * IOMMU drivers should normally call this from their domain_alloc
96 * callback when domain->type == IOMMU_DOMAIN_DMA.
97 */
98int iommu_get_dma_cookie(struct iommu_domain *domain)
99{
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100100 if (domain->iova_cookie)
101 return -EEXIST;
102
Robin Murphyfdbe5742017-01-19 20:57:46 +0000103 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
104 if (!domain->iova_cookie)
Robin Murphy44bb7e22016-09-12 17:13:59 +0100105 return -ENOMEM;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100106
Robin Murphy44bb7e22016-09-12 17:13:59 +0100107 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100108}
109EXPORT_SYMBOL(iommu_get_dma_cookie);
110
111/**
Robin Murphyfdbe5742017-01-19 20:57:46 +0000112 * iommu_get_msi_cookie - Acquire just MSI remapping resources
113 * @domain: IOMMU domain to prepare
114 * @base: Start address of IOVA region for MSI mappings
115 *
116 * Users who manage their own IOVA allocation and do not want DMA API support,
117 * but would still like to take advantage of automatic MSI remapping, can use
118 * this to initialise their own domain appropriately. Users should reserve a
119 * contiguous IOVA region, starting at @base, large enough to accommodate the
120 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
121 * used by the devices attached to @domain.
122 */
123int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
124{
125 struct iommu_dma_cookie *cookie;
126
127 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
128 return -EINVAL;
129
130 if (domain->iova_cookie)
131 return -EEXIST;
132
133 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
134 if (!cookie)
135 return -ENOMEM;
136
137 cookie->msi_iova = base;
138 domain->iova_cookie = cookie;
139 return 0;
140}
141EXPORT_SYMBOL(iommu_get_msi_cookie);
142
143/**
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100144 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
Robin Murphyfdbe5742017-01-19 20:57:46 +0000145 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
146 * iommu_get_msi_cookie()
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100147 *
148 * IOMMU drivers should normally call this from their domain_free callback.
149 */
150void iommu_put_dma_cookie(struct iommu_domain *domain)
151{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100152 struct iommu_dma_cookie *cookie = domain->iova_cookie;
153 struct iommu_dma_msi_page *msi, *tmp;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100154
Robin Murphy44bb7e22016-09-12 17:13:59 +0100155 if (!cookie)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100156 return;
157
Robin Murphyfdbe5742017-01-19 20:57:46 +0000158 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
Robin Murphy44bb7e22016-09-12 17:13:59 +0100159 put_iova_domain(&cookie->iovad);
160
161 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
162 list_del(&msi->list);
163 kfree(msi);
164 }
165 kfree(cookie);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100166 domain->iova_cookie = NULL;
167}
168EXPORT_SYMBOL(iommu_put_dma_cookie);
169
Robin Murphyfade1ec2016-09-12 17:14:00 +0100170static void iova_reserve_pci_windows(struct pci_dev *dev,
171 struct iova_domain *iovad)
172{
173 struct pci_host_bridge *bridge = pci_find_host_bridge(dev->bus);
174 struct resource_entry *window;
175 unsigned long lo, hi;
176
177 resource_list_for_each_entry(window, &bridge->windows) {
Robin Murphy938f1bb2017-03-16 17:00:17 +0000178 if (resource_type(window->res) != IORESOURCE_MEM)
Robin Murphyfade1ec2016-09-12 17:14:00 +0100179 continue;
180
181 lo = iova_pfn(iovad, window->res->start - window->offset);
182 hi = iova_pfn(iovad, window->res->end - window->offset);
183 reserve_iova(iovad, lo, hi);
184 }
185}
186
Robin Murphy7c1b0582017-03-16 17:00:18 +0000187static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
188 phys_addr_t start, phys_addr_t end)
189{
190 struct iova_domain *iovad = &cookie->iovad;
191 struct iommu_dma_msi_page *msi_page;
192 int i, num_pages;
193
194 start -= iova_offset(iovad, start);
195 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
196
197 msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
198 if (!msi_page)
199 return -ENOMEM;
200
201 for (i = 0; i < num_pages; i++) {
202 msi_page[i].phys = start;
203 msi_page[i].iova = start;
204 INIT_LIST_HEAD(&msi_page[i].list);
205 list_add(&msi_page[i].list, &cookie->msi_page_list);
206 start += iovad->granule;
207 }
208
209 return 0;
210}
211
212static int iova_reserve_iommu_regions(struct device *dev,
213 struct iommu_domain *domain)
214{
215 struct iommu_dma_cookie *cookie = domain->iova_cookie;
216 struct iova_domain *iovad = &cookie->iovad;
217 struct iommu_resv_region *region;
218 LIST_HEAD(resv_regions);
219 int ret = 0;
220
221 if (dev_is_pci(dev))
222 iova_reserve_pci_windows(to_pci_dev(dev), iovad);
223
224 iommu_get_resv_regions(dev, &resv_regions);
225 list_for_each_entry(region, &resv_regions, list) {
226 unsigned long lo, hi;
227
228 /* We ARE the software that manages these! */
229 if (region->type == IOMMU_RESV_SW_MSI)
230 continue;
231
232 lo = iova_pfn(iovad, region->start);
233 hi = iova_pfn(iovad, region->start + region->length - 1);
234 reserve_iova(iovad, lo, hi);
235
236 if (region->type == IOMMU_RESV_MSI)
237 ret = cookie_init_hw_msi_region(cookie, region->start,
238 region->start + region->length);
239 if (ret)
240 break;
241 }
242 iommu_put_resv_regions(dev, &resv_regions);
243
244 return ret;
245}
246
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100247/**
248 * iommu_dma_init_domain - Initialise a DMA mapping domain
249 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
250 * @base: IOVA at which the mappable address space starts
251 * @size: Size of IOVA space
Robin Murphyfade1ec2016-09-12 17:14:00 +0100252 * @dev: Device the domain is being initialised for
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100253 *
254 * @base and @size should be exact multiples of IOMMU page granularity to
255 * avoid rounding surprises. If necessary, we reserve the page at address 0
256 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
257 * any change which could make prior IOVAs invalid will fail.
258 */
Robin Murphyfade1ec2016-09-12 17:14:00 +0100259int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
260 u64 size, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100261{
Robin Murphyfdbe5742017-01-19 20:57:46 +0000262 struct iommu_dma_cookie *cookie = domain->iova_cookie;
263 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100264 unsigned long order, base_pfn, end_pfn;
265
Robin Murphyfdbe5742017-01-19 20:57:46 +0000266 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
267 return -EINVAL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100268
269 /* Use the smallest supported page size for IOVA granularity */
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100270 order = __ffs(domain->pgsize_bitmap);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100271 base_pfn = max_t(unsigned long, 1, base >> order);
272 end_pfn = (base + size - 1) >> order;
273
274 /* Check the domain allows at least some access to the device... */
275 if (domain->geometry.force_aperture) {
276 if (base > domain->geometry.aperture_end ||
277 base + size <= domain->geometry.aperture_start) {
278 pr_warn("specified DMA range outside IOMMU capability\n");
279 return -EFAULT;
280 }
281 /* ...then finally give it a kicking to make sure it fits */
282 base_pfn = max_t(unsigned long, base_pfn,
283 domain->geometry.aperture_start >> order);
284 end_pfn = min_t(unsigned long, end_pfn,
285 domain->geometry.aperture_end >> order);
286 }
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000287 /*
288 * PCI devices may have larger DMA masks, but still prefer allocating
289 * within a 32-bit mask to avoid DAC addressing. Such limitations don't
290 * apply to the typical platform device, so for those we may as well
291 * leave the cache limit at the top of their range to save an rb_last()
292 * traversal on every allocation.
293 */
Robin Murphy7c1b0582017-03-16 17:00:18 +0000294 if (dev && dev_is_pci(dev))
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000295 end_pfn &= DMA_BIT_MASK(32) >> order;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100296
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000297 /* start_pfn is always nonzero for an already-initialised domain */
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100298 if (iovad->start_pfn) {
299 if (1UL << order != iovad->granule ||
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000300 base_pfn != iovad->start_pfn) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100301 pr_warn("Incompatible range for DMA domain\n");
302 return -EFAULT;
303 }
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000304 /*
305 * If we have devices with different DMA masks, move the free
306 * area cache limit down for the benefit of the smaller one.
307 */
308 iovad->dma_32bit_pfn = min(end_pfn, iovad->dma_32bit_pfn);
Robin Murphy7c1b0582017-03-16 17:00:18 +0000309
310 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100311 }
Robin Murphy7c1b0582017-03-16 17:00:18 +0000312
313 init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
314 if (!dev)
315 return 0;
316
317 return iova_reserve_iommu_regions(dev, domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100318}
319EXPORT_SYMBOL(iommu_dma_init_domain);
320
321/**
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530322 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
323 * page flags.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100324 * @dir: Direction of DMA transfer
325 * @coherent: Is the DMA master cache-coherent?
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530326 * @attrs: DMA attributes for the mapping
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100327 *
328 * Return: corresponding IOMMU API page protection flags
329 */
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530330int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
331 unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100332{
333 int prot = coherent ? IOMMU_CACHE : 0;
334
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530335 if (attrs & DMA_ATTR_PRIVILEGED)
336 prot |= IOMMU_PRIV;
337
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100338 switch (dir) {
339 case DMA_BIDIRECTIONAL:
340 return prot | IOMMU_READ | IOMMU_WRITE;
341 case DMA_TO_DEVICE:
342 return prot | IOMMU_READ;
343 case DMA_FROM_DEVICE:
344 return prot | IOMMU_WRITE;
345 default:
346 return 0;
347 }
348}
349
Robin Murphyc987ff02016-08-09 17:31:35 +0100350static struct iova *__alloc_iova(struct iommu_domain *domain, size_t size,
Robin Murphy122fac02017-01-16 13:24:55 +0000351 dma_addr_t dma_limit, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100352{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100353 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100354 unsigned long shift = iova_shift(iovad);
355 unsigned long length = iova_align(iovad, size) >> shift;
Robin Murphy122fac02017-01-16 13:24:55 +0000356 struct iova *iova = NULL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100357
Robin Murphyc987ff02016-08-09 17:31:35 +0100358 if (domain->geometry.force_aperture)
359 dma_limit = min(dma_limit, domain->geometry.aperture_end);
Robin Murphy122fac02017-01-16 13:24:55 +0000360
361 /* Try to get PCI devices a SAC address */
362 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
363 iova = alloc_iova(iovad, length, DMA_BIT_MASK(32) >> shift,
364 true);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100365 /*
366 * Enforce size-alignment to be safe - there could perhaps be an
367 * attribute to control this per-device, or at least per-domain...
368 */
Robin Murphy122fac02017-01-16 13:24:55 +0000369 if (!iova)
370 iova = alloc_iova(iovad, length, dma_limit >> shift, true);
371
372 return iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100373}
374
375/* The IOVA allocator knows what we mapped, so just unmap whatever that was */
376static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr)
377{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100378 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100379 unsigned long shift = iova_shift(iovad);
380 unsigned long pfn = dma_addr >> shift;
381 struct iova *iova = find_iova(iovad, pfn);
382 size_t size;
383
384 if (WARN_ON(!iova))
385 return;
386
387 size = iova_size(iova) << shift;
388 size -= iommu_unmap(domain, pfn << shift, size);
389 /* ...and if we can't, then something is horribly, horribly wrong */
390 WARN_ON(size > 0);
391 __free_iova(iovad, iova);
392}
393
394static void __iommu_dma_free_pages(struct page **pages, int count)
395{
396 while (count--)
397 __free_page(pages[count]);
398 kvfree(pages);
399}
400
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100401static struct page **__iommu_dma_alloc_pages(unsigned int count,
402 unsigned long order_mask, gfp_t gfp)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100403{
404 struct page **pages;
405 unsigned int i = 0, array_size = count * sizeof(*pages);
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100406
407 order_mask &= (2U << MAX_ORDER) - 1;
408 if (!order_mask)
409 return NULL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100410
411 if (array_size <= PAGE_SIZE)
412 pages = kzalloc(array_size, GFP_KERNEL);
413 else
414 pages = vzalloc(array_size);
415 if (!pages)
416 return NULL;
417
418 /* IOMMU can map any pages, so himem can also be used here */
419 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
420
421 while (count) {
422 struct page *page = NULL;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100423 unsigned int order_size;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100424
425 /*
426 * Higher-order allocations are a convenience rather
427 * than a necessity, hence using __GFP_NORETRY until
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100428 * falling back to minimum-order allocations.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100429 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100430 for (order_mask &= (2U << __fls(count)) - 1;
431 order_mask; order_mask &= ~order_size) {
432 unsigned int order = __fls(order_mask);
433
434 order_size = 1U << order;
435 page = alloc_pages((order_mask - order_size) ?
436 gfp | __GFP_NORETRY : gfp, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100437 if (!page)
438 continue;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100439 if (!order)
440 break;
441 if (!PageCompound(page)) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100442 split_page(page, order);
443 break;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100444 } else if (!split_huge_page(page)) {
445 break;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100446 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100447 __free_pages(page, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100448 }
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100449 if (!page) {
450 __iommu_dma_free_pages(pages, i);
451 return NULL;
452 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100453 count -= order_size;
454 while (order_size--)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100455 pages[i++] = page++;
456 }
457 return pages;
458}
459
460/**
461 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
462 * @dev: Device which owns this buffer
463 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
464 * @size: Size of buffer in bytes
465 * @handle: DMA address of buffer
466 *
467 * Frees both the pages associated with the buffer, and the array
468 * describing them
469 */
470void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
471 dma_addr_t *handle)
472{
473 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle);
474 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
475 *handle = DMA_ERROR_CODE;
476}
477
478/**
479 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
480 * @dev: Device to allocate memory for. Must be a real device
481 * attached to an iommu_dma_domain
482 * @size: Size of buffer in bytes
483 * @gfp: Allocation flags
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100484 * @attrs: DMA attributes for this allocation
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100485 * @prot: IOMMU mapping flags
486 * @handle: Out argument for allocated DMA handle
487 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
488 * given VA/PA are visible to the given non-coherent device.
489 *
490 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
491 * but an IOMMU which supports smaller pages might not map the whole thing.
492 *
493 * Return: Array of struct page pointers describing the buffer,
494 * or NULL on failure.
495 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100496struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700497 unsigned long attrs, int prot, dma_addr_t *handle,
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100498 void (*flush_page)(struct device *, const void *, phys_addr_t))
499{
500 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100501 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100502 struct iova *iova;
503 struct page **pages;
504 struct sg_table sgt;
505 dma_addr_t dma_addr;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100506 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100507
508 *handle = DMA_ERROR_CODE;
509
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100510 min_size = alloc_sizes & -alloc_sizes;
511 if (min_size < PAGE_SIZE) {
512 min_size = PAGE_SIZE;
513 alloc_sizes |= PAGE_SIZE;
514 } else {
515 size = ALIGN(size, min_size);
516 }
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700517 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100518 alloc_sizes = min_size;
519
520 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
521 pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100522 if (!pages)
523 return NULL;
524
Robin Murphy122fac02017-01-16 13:24:55 +0000525 iova = __alloc_iova(domain, size, dev->coherent_dma_mask, dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100526 if (!iova)
527 goto out_free_pages;
528
529 size = iova_align(iovad, size);
530 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
531 goto out_free_iova;
532
533 if (!(prot & IOMMU_CACHE)) {
534 struct sg_mapping_iter miter;
535 /*
536 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
537 * sufficient here, so skip it by using the "wrong" direction.
538 */
539 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
540 while (sg_miter_next(&miter))
541 flush_page(dev, miter.addr, page_to_phys(miter.page));
542 sg_miter_stop(&miter);
543 }
544
545 dma_addr = iova_dma_addr(iovad, iova);
546 if (iommu_map_sg(domain, dma_addr, sgt.sgl, sgt.orig_nents, prot)
547 < size)
548 goto out_free_sg;
549
550 *handle = dma_addr;
551 sg_free_table(&sgt);
552 return pages;
553
554out_free_sg:
555 sg_free_table(&sgt);
556out_free_iova:
557 __free_iova(iovad, iova);
558out_free_pages:
559 __iommu_dma_free_pages(pages, count);
560 return NULL;
561}
562
563/**
564 * iommu_dma_mmap - Map a buffer into provided user VMA
565 * @pages: Array representing buffer from iommu_dma_alloc()
566 * @size: Size of buffer in bytes
567 * @vma: VMA describing requested userspace mapping
568 *
569 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
570 * for verifying the correct size and protection of @vma beforehand.
571 */
572
573int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
574{
575 unsigned long uaddr = vma->vm_start;
576 unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
577 int ret = -ENXIO;
578
579 for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
580 ret = vm_insert_page(vma, uaddr, pages[i]);
581 if (ret)
582 break;
583 uaddr += PAGE_SIZE;
584 }
585 return ret;
586}
587
Robin Murphy51f8cc92016-11-14 12:16:26 +0000588static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
589 size_t size, int prot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100590{
591 dma_addr_t dma_addr;
592 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100593 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100594 size_t iova_off = iova_offset(iovad, phys);
595 size_t len = iova_align(iovad, size + iova_off);
Robin Murphy122fac02017-01-16 13:24:55 +0000596 struct iova *iova = __alloc_iova(domain, len, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100597
598 if (!iova)
599 return DMA_ERROR_CODE;
600
601 dma_addr = iova_dma_addr(iovad, iova);
602 if (iommu_map(domain, dma_addr, phys - iova_off, len, prot)) {
603 __free_iova(iovad, iova);
604 return DMA_ERROR_CODE;
605 }
606 return dma_addr + iova_off;
607}
608
Robin Murphy51f8cc92016-11-14 12:16:26 +0000609dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
610 unsigned long offset, size_t size, int prot)
611{
612 return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
613}
614
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100615void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700616 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100617{
618 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
619}
620
621/*
622 * Prepare a successfully-mapped scatterlist to give back to the caller.
Robin Murphy809eac52016-04-11 12:32:31 +0100623 *
624 * At this point the segments are already laid out by iommu_dma_map_sg() to
625 * avoid individually crossing any boundaries, so we merely need to check a
626 * segment's start address to avoid concatenating across one.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100627 */
628static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
629 dma_addr_t dma_addr)
630{
Robin Murphy809eac52016-04-11 12:32:31 +0100631 struct scatterlist *s, *cur = sg;
632 unsigned long seg_mask = dma_get_seg_boundary(dev);
633 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
634 int i, count = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100635
636 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100637 /* Restore this segment's original unaligned fields first */
638 unsigned int s_iova_off = sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100639 unsigned int s_length = sg_dma_len(s);
Robin Murphy809eac52016-04-11 12:32:31 +0100640 unsigned int s_iova_len = s->length;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100641
Robin Murphy809eac52016-04-11 12:32:31 +0100642 s->offset += s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100643 s->length = s_length;
Robin Murphy809eac52016-04-11 12:32:31 +0100644 sg_dma_address(s) = DMA_ERROR_CODE;
645 sg_dma_len(s) = 0;
646
647 /*
648 * Now fill in the real DMA data. If...
649 * - there is a valid output segment to append to
650 * - and this segment starts on an IOVA page boundary
651 * - but doesn't fall at a segment boundary
652 * - and wouldn't make the resulting output segment too long
653 */
654 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
655 (cur_len + s_length <= max_len)) {
656 /* ...then concatenate it with the previous one */
657 cur_len += s_length;
658 } else {
659 /* Otherwise start the next output segment */
660 if (i > 0)
661 cur = sg_next(cur);
662 cur_len = s_length;
663 count++;
664
665 sg_dma_address(cur) = dma_addr + s_iova_off;
666 }
667
668 sg_dma_len(cur) = cur_len;
669 dma_addr += s_iova_len;
670
671 if (s_length + s_iova_off < s_iova_len)
672 cur_len = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100673 }
Robin Murphy809eac52016-04-11 12:32:31 +0100674 return count;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100675}
676
677/*
678 * If mapping failed, then just restore the original list,
679 * but making sure the DMA fields are invalidated.
680 */
681static void __invalidate_sg(struct scatterlist *sg, int nents)
682{
683 struct scatterlist *s;
684 int i;
685
686 for_each_sg(sg, s, nents, i) {
687 if (sg_dma_address(s) != DMA_ERROR_CODE)
Robin Murphy07b48ac2016-03-10 19:28:12 +0000688 s->offset += sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100689 if (sg_dma_len(s))
690 s->length = sg_dma_len(s);
691 sg_dma_address(s) = DMA_ERROR_CODE;
692 sg_dma_len(s) = 0;
693 }
694}
695
696/*
697 * The DMA API client is passing in a scatterlist which could describe
698 * any old buffer layout, but the IOMMU API requires everything to be
699 * aligned to IOMMU pages. Hence the need for this complicated bit of
700 * impedance-matching, to be able to hand off a suitably-aligned list,
701 * but still preserve the original offsets and sizes for the caller.
702 */
703int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
704 int nents, int prot)
705{
706 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100707 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100708 struct iova *iova;
709 struct scatterlist *s, *prev = NULL;
710 dma_addr_t dma_addr;
711 size_t iova_len = 0;
Robin Murphy809eac52016-04-11 12:32:31 +0100712 unsigned long mask = dma_get_seg_boundary(dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100713 int i;
714
715 /*
716 * Work out how much IOVA space we need, and align the segments to
717 * IOVA granules for the IOMMU driver to handle. With some clever
718 * trickery we can modify the list in-place, but reversibly, by
Robin Murphy809eac52016-04-11 12:32:31 +0100719 * stashing the unaligned parts in the as-yet-unused DMA fields.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100720 */
721 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100722 size_t s_iova_off = iova_offset(iovad, s->offset);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100723 size_t s_length = s->length;
Robin Murphy809eac52016-04-11 12:32:31 +0100724 size_t pad_len = (mask - iova_len + 1) & mask;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100725
Robin Murphy809eac52016-04-11 12:32:31 +0100726 sg_dma_address(s) = s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100727 sg_dma_len(s) = s_length;
Robin Murphy809eac52016-04-11 12:32:31 +0100728 s->offset -= s_iova_off;
729 s_length = iova_align(iovad, s_length + s_iova_off);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100730 s->length = s_length;
731
732 /*
Robin Murphy809eac52016-04-11 12:32:31 +0100733 * Due to the alignment of our single IOVA allocation, we can
734 * depend on these assumptions about the segment boundary mask:
735 * - If mask size >= IOVA size, then the IOVA range cannot
736 * possibly fall across a boundary, so we don't care.
737 * - If mask size < IOVA size, then the IOVA range must start
738 * exactly on a boundary, therefore we can lay things out
739 * based purely on segment lengths without needing to know
740 * the actual addresses beforehand.
741 * - The mask must be a power of 2, so pad_len == 0 if
742 * iova_len == 0, thus we cannot dereference prev the first
743 * time through here (i.e. before it has a meaningful value).
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100744 */
Robin Murphy809eac52016-04-11 12:32:31 +0100745 if (pad_len && pad_len < s_length - 1) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100746 prev->length += pad_len;
747 iova_len += pad_len;
748 }
749
750 iova_len += s_length;
751 prev = s;
752 }
753
Robin Murphy122fac02017-01-16 13:24:55 +0000754 iova = __alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100755 if (!iova)
756 goto out_restore_sg;
757
758 /*
759 * We'll leave any physical concatenation to the IOMMU driver's
760 * implementation - it knows better than we do.
761 */
762 dma_addr = iova_dma_addr(iovad, iova);
763 if (iommu_map_sg(domain, dma_addr, sg, nents, prot) < iova_len)
764 goto out_free_iova;
765
766 return __finalise_sg(dev, sg, nents, dma_addr);
767
768out_free_iova:
769 __free_iova(iovad, iova);
770out_restore_sg:
771 __invalidate_sg(sg, nents);
772 return 0;
773}
774
775void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700776 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100777{
778 /*
779 * The scatterlist segments are mapped into a single
780 * contiguous IOVA allocation, so this is incredibly easy.
781 */
782 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), sg_dma_address(sg));
783}
784
Robin Murphy51f8cc92016-11-14 12:16:26 +0000785dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
786 size_t size, enum dma_data_direction dir, unsigned long attrs)
787{
788 return __iommu_dma_map(dev, phys, size,
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530789 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
Robin Murphy51f8cc92016-11-14 12:16:26 +0000790}
791
792void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
793 size_t size, enum dma_data_direction dir, unsigned long attrs)
794{
795 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle);
796}
797
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100798int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
799{
800 return dma_addr == DMA_ERROR_CODE;
801}
Robin Murphy44bb7e22016-09-12 17:13:59 +0100802
803static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
804 phys_addr_t msi_addr, struct iommu_domain *domain)
805{
806 struct iommu_dma_cookie *cookie = domain->iova_cookie;
807 struct iommu_dma_msi_page *msi_page;
Robin Murphyfdbe5742017-01-19 20:57:46 +0000808 struct iova_domain *iovad = cookie_iovad(domain);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100809 struct iova *iova;
810 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
Robin Murphyfdbe5742017-01-19 20:57:46 +0000811 size_t size = cookie_msi_granule(cookie);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100812
Robin Murphyfdbe5742017-01-19 20:57:46 +0000813 msi_addr &= ~(phys_addr_t)(size - 1);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100814 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
815 if (msi_page->phys == msi_addr)
816 return msi_page;
817
818 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
819 if (!msi_page)
820 return NULL;
821
Robin Murphy44bb7e22016-09-12 17:13:59 +0100822 msi_page->phys = msi_addr;
Robin Murphyfdbe5742017-01-19 20:57:46 +0000823 if (iovad) {
Robin Murphy122fac02017-01-16 13:24:55 +0000824 iova = __alloc_iova(domain, size, dma_get_mask(dev), dev);
Robin Murphyfdbe5742017-01-19 20:57:46 +0000825 if (!iova)
826 goto out_free_page;
827 msi_page->iova = iova_dma_addr(iovad, iova);
828 } else {
829 msi_page->iova = cookie->msi_iova;
830 cookie->msi_iova += size;
831 }
832
833 if (iommu_map(domain, msi_page->iova, msi_addr, size, prot))
Robin Murphy44bb7e22016-09-12 17:13:59 +0100834 goto out_free_iova;
835
836 INIT_LIST_HEAD(&msi_page->list);
837 list_add(&msi_page->list, &cookie->msi_page_list);
838 return msi_page;
839
840out_free_iova:
Robin Murphyfdbe5742017-01-19 20:57:46 +0000841 if (iovad)
842 __free_iova(iovad, iova);
843 else
844 cookie->msi_iova -= size;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100845out_free_page:
846 kfree(msi_page);
847 return NULL;
848}
849
850void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
851{
852 struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
853 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
854 struct iommu_dma_cookie *cookie;
855 struct iommu_dma_msi_page *msi_page;
856 phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
857 unsigned long flags;
858
859 if (!domain || !domain->iova_cookie)
860 return;
861
862 cookie = domain->iova_cookie;
863
864 /*
865 * We disable IRQs to rule out a possible inversion against
866 * irq_desc_lock if, say, someone tries to retarget the affinity
867 * of an MSI from within an IPI handler.
868 */
869 spin_lock_irqsave(&cookie->msi_lock, flags);
870 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
871 spin_unlock_irqrestore(&cookie->msi_lock, flags);
872
873 if (WARN_ON(!msi_page)) {
874 /*
875 * We're called from a void callback, so the best we can do is
876 * 'fail' by filling the message with obviously bogus values.
877 * Since we got this far due to an IOMMU being present, it's
878 * not like the existing address would have worked anyway...
879 */
880 msg->address_hi = ~0U;
881 msg->address_lo = ~0U;
882 msg->data = ~0U;
883 } else {
884 msg->address_hi = upper_32_bits(msi_page->iova);
Robin Murphyfdbe5742017-01-19 20:57:46 +0000885 msg->address_lo &= cookie_msi_granule(cookie) - 1;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100886 msg->address_lo += lower_32_bits(msi_page->iova);
887 }
888}