blob: 9d1cebe7f6cbb14517718f5fd6d03de7ee503956 [file] [log] [blame]
Robin Murphy0db2e5d2015-10-01 20:13:58 +01001/*
2 * A fairly generic DMA-API to IOMMU-API glue layer.
3 *
4 * Copyright (C) 2014-2015 ARM Ltd.
5 *
6 * based in part on arch/arm/mm/dma-mapping.c:
7 * Copyright (C) 2000-2004 Russell King
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#include <linux/device.h>
23#include <linux/dma-iommu.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000024#include <linux/gfp.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010025#include <linux/huge_mm.h>
26#include <linux/iommu.h>
27#include <linux/iova.h>
Robin Murphy44bb7e22016-09-12 17:13:59 +010028#include <linux/irq.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010029#include <linux/mm.h>
Robin Murphyfade1ec2016-09-12 17:14:00 +010030#include <linux/pci.h>
Robin Murphy5b11e9c2015-12-18 17:01:46 +000031#include <linux/scatterlist.h>
32#include <linux/vmalloc.h>
Robin Murphy0db2e5d2015-10-01 20:13:58 +010033
Christoph Hellwig81a5a312017-05-22 10:55:30 +020034#define IOMMU_MAPPING_ERROR 0
35
Robin Murphy44bb7e22016-09-12 17:13:59 +010036struct iommu_dma_msi_page {
37 struct list_head list;
38 dma_addr_t iova;
39 phys_addr_t phys;
40};
41
Robin Murphyfdbe5742017-01-19 20:57:46 +000042enum iommu_dma_cookie_type {
43 IOMMU_DMA_IOVA_COOKIE,
44 IOMMU_DMA_MSI_COOKIE,
Robin Murphy44bb7e22016-09-12 17:13:59 +010045};
46
Robin Murphyfdbe5742017-01-19 20:57:46 +000047struct iommu_dma_cookie {
48 enum iommu_dma_cookie_type type;
49 union {
50 /* Full allocator for IOMMU_DMA_IOVA_COOKIE */
51 struct iova_domain iovad;
52 /* Trivial linear page allocator for IOMMU_DMA_MSI_COOKIE */
53 dma_addr_t msi_iova;
54 };
55 struct list_head msi_page_list;
56 spinlock_t msi_lock;
57};
58
59static inline size_t cookie_msi_granule(struct iommu_dma_cookie *cookie)
60{
61 if (cookie->type == IOMMU_DMA_IOVA_COOKIE)
62 return cookie->iovad.granule;
63 return PAGE_SIZE;
64}
65
Robin Murphyfdbe5742017-01-19 20:57:46 +000066static struct iommu_dma_cookie *cookie_alloc(enum iommu_dma_cookie_type type)
67{
68 struct iommu_dma_cookie *cookie;
69
70 cookie = kzalloc(sizeof(*cookie), GFP_KERNEL);
71 if (cookie) {
72 spin_lock_init(&cookie->msi_lock);
73 INIT_LIST_HEAD(&cookie->msi_page_list);
74 cookie->type = type;
75 }
76 return cookie;
Robin Murphy44bb7e22016-09-12 17:13:59 +010077}
78
Robin Murphy0db2e5d2015-10-01 20:13:58 +010079int iommu_dma_init(void)
80{
81 return iova_cache_get();
82}
83
84/**
85 * iommu_get_dma_cookie - Acquire DMA-API resources for a domain
86 * @domain: IOMMU domain to prepare for DMA-API usage
87 *
88 * IOMMU drivers should normally call this from their domain_alloc
89 * callback when domain->type == IOMMU_DOMAIN_DMA.
90 */
91int iommu_get_dma_cookie(struct iommu_domain *domain)
92{
Robin Murphy0db2e5d2015-10-01 20:13:58 +010093 if (domain->iova_cookie)
94 return -EEXIST;
95
Robin Murphyfdbe5742017-01-19 20:57:46 +000096 domain->iova_cookie = cookie_alloc(IOMMU_DMA_IOVA_COOKIE);
97 if (!domain->iova_cookie)
Robin Murphy44bb7e22016-09-12 17:13:59 +010098 return -ENOMEM;
Robin Murphy0db2e5d2015-10-01 20:13:58 +010099
Robin Murphy44bb7e22016-09-12 17:13:59 +0100100 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100101}
102EXPORT_SYMBOL(iommu_get_dma_cookie);
103
104/**
Robin Murphyfdbe5742017-01-19 20:57:46 +0000105 * iommu_get_msi_cookie - Acquire just MSI remapping resources
106 * @domain: IOMMU domain to prepare
107 * @base: Start address of IOVA region for MSI mappings
108 *
109 * Users who manage their own IOVA allocation and do not want DMA API support,
110 * but would still like to take advantage of automatic MSI remapping, can use
111 * this to initialise their own domain appropriately. Users should reserve a
112 * contiguous IOVA region, starting at @base, large enough to accommodate the
113 * number of PAGE_SIZE mappings necessary to cover every MSI doorbell address
114 * used by the devices attached to @domain.
115 */
116int iommu_get_msi_cookie(struct iommu_domain *domain, dma_addr_t base)
117{
118 struct iommu_dma_cookie *cookie;
119
120 if (domain->type != IOMMU_DOMAIN_UNMANAGED)
121 return -EINVAL;
122
123 if (domain->iova_cookie)
124 return -EEXIST;
125
126 cookie = cookie_alloc(IOMMU_DMA_MSI_COOKIE);
127 if (!cookie)
128 return -ENOMEM;
129
130 cookie->msi_iova = base;
131 domain->iova_cookie = cookie;
132 return 0;
133}
134EXPORT_SYMBOL(iommu_get_msi_cookie);
135
136/**
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100137 * iommu_put_dma_cookie - Release a domain's DMA mapping resources
Robin Murphyfdbe5742017-01-19 20:57:46 +0000138 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie() or
139 * iommu_get_msi_cookie()
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100140 *
141 * IOMMU drivers should normally call this from their domain_free callback.
142 */
143void iommu_put_dma_cookie(struct iommu_domain *domain)
144{
Robin Murphy44bb7e22016-09-12 17:13:59 +0100145 struct iommu_dma_cookie *cookie = domain->iova_cookie;
146 struct iommu_dma_msi_page *msi, *tmp;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100147
Robin Murphy44bb7e22016-09-12 17:13:59 +0100148 if (!cookie)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100149 return;
150
Robin Murphyfdbe5742017-01-19 20:57:46 +0000151 if (cookie->type == IOMMU_DMA_IOVA_COOKIE && cookie->iovad.granule)
Robin Murphy44bb7e22016-09-12 17:13:59 +0100152 put_iova_domain(&cookie->iovad);
153
154 list_for_each_entry_safe(msi, tmp, &cookie->msi_page_list, list) {
155 list_del(&msi->list);
156 kfree(msi);
157 }
158 kfree(cookie);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100159 domain->iova_cookie = NULL;
160}
161EXPORT_SYMBOL(iommu_put_dma_cookie);
162
Robin Murphy273df962017-03-16 17:00:19 +0000163/**
164 * iommu_dma_get_resv_regions - Reserved region driver helper
165 * @dev: Device from iommu_get_resv_regions()
166 * @list: Reserved region list from iommu_get_resv_regions()
167 *
168 * IOMMU drivers can use this to implement their .get_resv_regions callback
169 * for general non-IOMMU-specific reservations. Currently, this covers host
170 * bridge windows for PCI devices.
171 */
172void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
Robin Murphyfade1ec2016-09-12 17:14:00 +0100173{
Robin Murphy273df962017-03-16 17:00:19 +0000174 struct pci_host_bridge *bridge;
Robin Murphyfade1ec2016-09-12 17:14:00 +0100175 struct resource_entry *window;
Robin Murphyfade1ec2016-09-12 17:14:00 +0100176
Robin Murphy273df962017-03-16 17:00:19 +0000177 if (!dev_is_pci(dev))
178 return;
179
180 bridge = pci_find_host_bridge(to_pci_dev(dev)->bus);
Robin Murphyfade1ec2016-09-12 17:14:00 +0100181 resource_list_for_each_entry(window, &bridge->windows) {
Robin Murphy273df962017-03-16 17:00:19 +0000182 struct iommu_resv_region *region;
183 phys_addr_t start;
184 size_t length;
185
Robin Murphy938f1bb2017-03-16 17:00:17 +0000186 if (resource_type(window->res) != IORESOURCE_MEM)
Robin Murphyfade1ec2016-09-12 17:14:00 +0100187 continue;
188
Robin Murphy273df962017-03-16 17:00:19 +0000189 start = window->res->start - window->offset;
190 length = window->res->end - window->res->start + 1;
191 region = iommu_alloc_resv_region(start, length, 0,
192 IOMMU_RESV_RESERVED);
193 if (!region)
194 return;
195
196 list_add_tail(&region->list, list);
Robin Murphyfade1ec2016-09-12 17:14:00 +0100197 }
198}
Robin Murphy273df962017-03-16 17:00:19 +0000199EXPORT_SYMBOL(iommu_dma_get_resv_regions);
Robin Murphyfade1ec2016-09-12 17:14:00 +0100200
Robin Murphy7c1b0582017-03-16 17:00:18 +0000201static int cookie_init_hw_msi_region(struct iommu_dma_cookie *cookie,
202 phys_addr_t start, phys_addr_t end)
203{
204 struct iova_domain *iovad = &cookie->iovad;
205 struct iommu_dma_msi_page *msi_page;
206 int i, num_pages;
207
208 start -= iova_offset(iovad, start);
209 num_pages = iova_align(iovad, end - start) >> iova_shift(iovad);
210
211 msi_page = kcalloc(num_pages, sizeof(*msi_page), GFP_KERNEL);
212 if (!msi_page)
213 return -ENOMEM;
214
215 for (i = 0; i < num_pages; i++) {
216 msi_page[i].phys = start;
217 msi_page[i].iova = start;
218 INIT_LIST_HEAD(&msi_page[i].list);
219 list_add(&msi_page[i].list, &cookie->msi_page_list);
220 start += iovad->granule;
221 }
222
223 return 0;
224}
225
226static int iova_reserve_iommu_regions(struct device *dev,
227 struct iommu_domain *domain)
228{
229 struct iommu_dma_cookie *cookie = domain->iova_cookie;
230 struct iova_domain *iovad = &cookie->iovad;
231 struct iommu_resv_region *region;
232 LIST_HEAD(resv_regions);
233 int ret = 0;
234
Robin Murphy7c1b0582017-03-16 17:00:18 +0000235 iommu_get_resv_regions(dev, &resv_regions);
236 list_for_each_entry(region, &resv_regions, list) {
237 unsigned long lo, hi;
238
239 /* We ARE the software that manages these! */
240 if (region->type == IOMMU_RESV_SW_MSI)
241 continue;
242
243 lo = iova_pfn(iovad, region->start);
244 hi = iova_pfn(iovad, region->start + region->length - 1);
245 reserve_iova(iovad, lo, hi);
246
247 if (region->type == IOMMU_RESV_MSI)
248 ret = cookie_init_hw_msi_region(cookie, region->start,
249 region->start + region->length);
250 if (ret)
251 break;
252 }
253 iommu_put_resv_regions(dev, &resv_regions);
254
255 return ret;
256}
257
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100258/**
259 * iommu_dma_init_domain - Initialise a DMA mapping domain
260 * @domain: IOMMU domain previously prepared by iommu_get_dma_cookie()
261 * @base: IOVA at which the mappable address space starts
262 * @size: Size of IOVA space
Robin Murphyfade1ec2016-09-12 17:14:00 +0100263 * @dev: Device the domain is being initialised for
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100264 *
265 * @base and @size should be exact multiples of IOMMU page granularity to
266 * avoid rounding surprises. If necessary, we reserve the page at address 0
267 * to ensure it is an invalid IOVA. It is safe to reinitialise a domain, but
268 * any change which could make prior IOVAs invalid will fail.
269 */
Robin Murphyfade1ec2016-09-12 17:14:00 +0100270int iommu_dma_init_domain(struct iommu_domain *domain, dma_addr_t base,
271 u64 size, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100272{
Robin Murphyfdbe5742017-01-19 20:57:46 +0000273 struct iommu_dma_cookie *cookie = domain->iova_cookie;
274 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100275 unsigned long order, base_pfn, end_pfn;
276
Robin Murphyfdbe5742017-01-19 20:57:46 +0000277 if (!cookie || cookie->type != IOMMU_DMA_IOVA_COOKIE)
278 return -EINVAL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100279
280 /* Use the smallest supported page size for IOVA granularity */
Robin Murphyd16e0fa2016-04-07 18:42:06 +0100281 order = __ffs(domain->pgsize_bitmap);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100282 base_pfn = max_t(unsigned long, 1, base >> order);
283 end_pfn = (base + size - 1) >> order;
284
285 /* Check the domain allows at least some access to the device... */
286 if (domain->geometry.force_aperture) {
287 if (base > domain->geometry.aperture_end ||
288 base + size <= domain->geometry.aperture_start) {
289 pr_warn("specified DMA range outside IOMMU capability\n");
290 return -EFAULT;
291 }
292 /* ...then finally give it a kicking to make sure it fits */
293 base_pfn = max_t(unsigned long, base_pfn,
294 domain->geometry.aperture_start >> order);
295 end_pfn = min_t(unsigned long, end_pfn,
296 domain->geometry.aperture_end >> order);
297 }
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000298 /*
299 * PCI devices may have larger DMA masks, but still prefer allocating
300 * within a 32-bit mask to avoid DAC addressing. Such limitations don't
301 * apply to the typical platform device, so for those we may as well
302 * leave the cache limit at the top of their range to save an rb_last()
303 * traversal on every allocation.
304 */
Robin Murphy7c1b0582017-03-16 17:00:18 +0000305 if (dev && dev_is_pci(dev))
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000306 end_pfn &= DMA_BIT_MASK(32) >> order;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100307
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000308 /* start_pfn is always nonzero for an already-initialised domain */
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100309 if (iovad->start_pfn) {
310 if (1UL << order != iovad->granule ||
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000311 base_pfn != iovad->start_pfn) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100312 pr_warn("Incompatible range for DMA domain\n");
313 return -EFAULT;
314 }
Robin Murphyf51d7bb2017-01-16 13:24:54 +0000315 /*
316 * If we have devices with different DMA masks, move the free
317 * area cache limit down for the benefit of the smaller one.
318 */
Robin Murphy757c3702017-05-16 12:26:48 +0100319 iovad->dma_32bit_pfn = min(end_pfn + 1, iovad->dma_32bit_pfn);
Robin Murphy7c1b0582017-03-16 17:00:18 +0000320
321 return 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100322 }
Robin Murphy7c1b0582017-03-16 17:00:18 +0000323
324 init_iova_domain(iovad, 1UL << order, base_pfn, end_pfn);
325 if (!dev)
326 return 0;
327
328 return iova_reserve_iommu_regions(dev, domain);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100329}
330EXPORT_SYMBOL(iommu_dma_init_domain);
331
332/**
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530333 * dma_info_to_prot - Translate DMA API directions and attributes to IOMMU API
334 * page flags.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100335 * @dir: Direction of DMA transfer
336 * @coherent: Is the DMA master cache-coherent?
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530337 * @attrs: DMA attributes for the mapping
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100338 *
339 * Return: corresponding IOMMU API page protection flags
340 */
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530341int dma_info_to_prot(enum dma_data_direction dir, bool coherent,
342 unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100343{
344 int prot = coherent ? IOMMU_CACHE : 0;
345
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530346 if (attrs & DMA_ATTR_PRIVILEGED)
347 prot |= IOMMU_PRIV;
348
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100349 switch (dir) {
350 case DMA_BIDIRECTIONAL:
351 return prot | IOMMU_READ | IOMMU_WRITE;
352 case DMA_TO_DEVICE:
353 return prot | IOMMU_READ;
354 case DMA_FROM_DEVICE:
355 return prot | IOMMU_WRITE;
356 default:
357 return 0;
358 }
359}
360
Robin Murphy842fe512017-03-31 15:46:05 +0100361static dma_addr_t iommu_dma_alloc_iova(struct iommu_domain *domain,
362 size_t size, dma_addr_t dma_limit, struct device *dev)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100363{
Robin Murphya44e6652017-03-31 15:46:06 +0100364 struct iommu_dma_cookie *cookie = domain->iova_cookie;
365 struct iova_domain *iovad = &cookie->iovad;
Robin Murphybb65a642017-03-31 15:46:07 +0100366 unsigned long shift, iova_len, iova = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100367
Robin Murphya44e6652017-03-31 15:46:06 +0100368 if (cookie->type == IOMMU_DMA_MSI_COOKIE) {
369 cookie->msi_iova += size;
370 return cookie->msi_iova - size;
371 }
372
373 shift = iova_shift(iovad);
374 iova_len = size >> shift;
Robin Murphybb65a642017-03-31 15:46:07 +0100375 /*
376 * Freeing non-power-of-two-sized allocations back into the IOVA caches
377 * will come back to bite us badly, so we have to waste a bit of space
378 * rounding up anything cacheable to make sure that can't happen. The
379 * order of the unadjusted size will still match upon freeing.
380 */
381 if (iova_len < (1 << (IOVA_RANGE_CACHE_MAX_SIZE - 1)))
382 iova_len = roundup_pow_of_two(iova_len);
Robin Murphya44e6652017-03-31 15:46:06 +0100383
Robin Murphyc987ff02016-08-09 17:31:35 +0100384 if (domain->geometry.force_aperture)
385 dma_limit = min(dma_limit, domain->geometry.aperture_end);
Robin Murphy122fac02017-01-16 13:24:55 +0000386
387 /* Try to get PCI devices a SAC address */
388 if (dma_limit > DMA_BIT_MASK(32) && dev_is_pci(dev))
Robin Murphybb65a642017-03-31 15:46:07 +0100389 iova = alloc_iova_fast(iovad, iova_len, DMA_BIT_MASK(32) >> shift);
Robin Murphy122fac02017-01-16 13:24:55 +0000390
Robin Murphybb65a642017-03-31 15:46:07 +0100391 if (!iova)
392 iova = alloc_iova_fast(iovad, iova_len, dma_limit >> shift);
393
394 return (dma_addr_t)iova << shift;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100395}
396
Robin Murphy842fe512017-03-31 15:46:05 +0100397static void iommu_dma_free_iova(struct iommu_dma_cookie *cookie,
398 dma_addr_t iova, size_t size)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100399{
Robin Murphy842fe512017-03-31 15:46:05 +0100400 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100401
Robin Murphya44e6652017-03-31 15:46:06 +0100402 /* The MSI case is only ever cleaning up its most recent allocation */
Robin Murphybb65a642017-03-31 15:46:07 +0100403 if (cookie->type == IOMMU_DMA_MSI_COOKIE)
Robin Murphya44e6652017-03-31 15:46:06 +0100404 cookie->msi_iova -= size;
Robin Murphybb65a642017-03-31 15:46:07 +0100405 else
Robin Murphy1cc896e2017-05-15 16:01:30 +0100406 free_iova_fast(iovad, iova_pfn(iovad, iova),
407 size >> iova_shift(iovad));
Robin Murphy842fe512017-03-31 15:46:05 +0100408}
409
410static void __iommu_dma_unmap(struct iommu_domain *domain, dma_addr_t dma_addr,
411 size_t size)
412{
Robin Murphya44e6652017-03-31 15:46:06 +0100413 struct iommu_dma_cookie *cookie = domain->iova_cookie;
414 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy842fe512017-03-31 15:46:05 +0100415 size_t iova_off = iova_offset(iovad, dma_addr);
416
417 dma_addr -= iova_off;
418 size = iova_align(iovad, size + iova_off);
419
420 WARN_ON(iommu_unmap(domain, dma_addr, size) != size);
Robin Murphya44e6652017-03-31 15:46:06 +0100421 iommu_dma_free_iova(cookie, dma_addr, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100422}
423
424static void __iommu_dma_free_pages(struct page **pages, int count)
425{
426 while (count--)
427 __free_page(pages[count]);
428 kvfree(pages);
429}
430
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100431static struct page **__iommu_dma_alloc_pages(unsigned int count,
432 unsigned long order_mask, gfp_t gfp)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100433{
434 struct page **pages;
435 unsigned int i = 0, array_size = count * sizeof(*pages);
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100436
437 order_mask &= (2U << MAX_ORDER) - 1;
438 if (!order_mask)
439 return NULL;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100440
441 if (array_size <= PAGE_SIZE)
442 pages = kzalloc(array_size, GFP_KERNEL);
443 else
444 pages = vzalloc(array_size);
445 if (!pages)
446 return NULL;
447
448 /* IOMMU can map any pages, so himem can also be used here */
449 gfp |= __GFP_NOWARN | __GFP_HIGHMEM;
450
451 while (count) {
452 struct page *page = NULL;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100453 unsigned int order_size;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100454
455 /*
456 * Higher-order allocations are a convenience rather
457 * than a necessity, hence using __GFP_NORETRY until
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100458 * falling back to minimum-order allocations.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100459 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100460 for (order_mask &= (2U << __fls(count)) - 1;
461 order_mask; order_mask &= ~order_size) {
462 unsigned int order = __fls(order_mask);
463
464 order_size = 1U << order;
465 page = alloc_pages((order_mask - order_size) ?
466 gfp | __GFP_NORETRY : gfp, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100467 if (!page)
468 continue;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100469 if (!order)
470 break;
471 if (!PageCompound(page)) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100472 split_page(page, order);
473 break;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100474 } else if (!split_huge_page(page)) {
475 break;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100476 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100477 __free_pages(page, order);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100478 }
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100479 if (!page) {
480 __iommu_dma_free_pages(pages, i);
481 return NULL;
482 }
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100483 count -= order_size;
484 while (order_size--)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100485 pages[i++] = page++;
486 }
487 return pages;
488}
489
490/**
491 * iommu_dma_free - Free a buffer allocated by iommu_dma_alloc()
492 * @dev: Device which owns this buffer
493 * @pages: Array of buffer pages as returned by iommu_dma_alloc()
494 * @size: Size of buffer in bytes
495 * @handle: DMA address of buffer
496 *
497 * Frees both the pages associated with the buffer, and the array
498 * describing them
499 */
500void iommu_dma_free(struct device *dev, struct page **pages, size_t size,
501 dma_addr_t *handle)
502{
Robin Murphy842fe512017-03-31 15:46:05 +0100503 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), *handle, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100504 __iommu_dma_free_pages(pages, PAGE_ALIGN(size) >> PAGE_SHIFT);
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200505 *handle = IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100506}
507
508/**
509 * iommu_dma_alloc - Allocate and map a buffer contiguous in IOVA space
510 * @dev: Device to allocate memory for. Must be a real device
511 * attached to an iommu_dma_domain
512 * @size: Size of buffer in bytes
513 * @gfp: Allocation flags
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100514 * @attrs: DMA attributes for this allocation
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100515 * @prot: IOMMU mapping flags
516 * @handle: Out argument for allocated DMA handle
517 * @flush_page: Arch callback which must ensure PAGE_SIZE bytes from the
518 * given VA/PA are visible to the given non-coherent device.
519 *
520 * If @size is less than PAGE_SIZE, then a full CPU page will be allocated,
521 * but an IOMMU which supports smaller pages might not map the whole thing.
522 *
523 * Return: Array of struct page pointers describing the buffer,
524 * or NULL on failure.
525 */
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100526struct page **iommu_dma_alloc(struct device *dev, size_t size, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700527 unsigned long attrs, int prot, dma_addr_t *handle,
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100528 void (*flush_page)(struct device *, const void *, phys_addr_t))
529{
530 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy842fe512017-03-31 15:46:05 +0100531 struct iommu_dma_cookie *cookie = domain->iova_cookie;
532 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100533 struct page **pages;
534 struct sg_table sgt;
Robin Murphy842fe512017-03-31 15:46:05 +0100535 dma_addr_t iova;
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100536 unsigned int count, min_size, alloc_sizes = domain->pgsize_bitmap;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100537
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200538 *handle = IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100539
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100540 min_size = alloc_sizes & -alloc_sizes;
541 if (min_size < PAGE_SIZE) {
542 min_size = PAGE_SIZE;
543 alloc_sizes |= PAGE_SIZE;
544 } else {
545 size = ALIGN(size, min_size);
546 }
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700547 if (attrs & DMA_ATTR_ALLOC_SINGLE_PAGES)
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100548 alloc_sizes = min_size;
549
550 count = PAGE_ALIGN(size) >> PAGE_SHIFT;
551 pages = __iommu_dma_alloc_pages(count, alloc_sizes >> PAGE_SHIFT, gfp);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100552 if (!pages)
553 return NULL;
554
Robin Murphy842fe512017-03-31 15:46:05 +0100555 size = iova_align(iovad, size);
556 iova = iommu_dma_alloc_iova(domain, size, dev->coherent_dma_mask, dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100557 if (!iova)
558 goto out_free_pages;
559
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100560 if (sg_alloc_table_from_pages(&sgt, pages, count, 0, size, GFP_KERNEL))
561 goto out_free_iova;
562
563 if (!(prot & IOMMU_CACHE)) {
564 struct sg_mapping_iter miter;
565 /*
566 * The CPU-centric flushing implied by SG_MITER_TO_SG isn't
567 * sufficient here, so skip it by using the "wrong" direction.
568 */
569 sg_miter_start(&miter, sgt.sgl, sgt.orig_nents, SG_MITER_FROM_SG);
570 while (sg_miter_next(&miter))
571 flush_page(dev, miter.addr, page_to_phys(miter.page));
572 sg_miter_stop(&miter);
573 }
574
Robin Murphy842fe512017-03-31 15:46:05 +0100575 if (iommu_map_sg(domain, iova, sgt.sgl, sgt.orig_nents, prot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100576 < size)
577 goto out_free_sg;
578
Robin Murphy842fe512017-03-31 15:46:05 +0100579 *handle = iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100580 sg_free_table(&sgt);
581 return pages;
582
583out_free_sg:
584 sg_free_table(&sgt);
585out_free_iova:
Robin Murphy842fe512017-03-31 15:46:05 +0100586 iommu_dma_free_iova(cookie, iova, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100587out_free_pages:
588 __iommu_dma_free_pages(pages, count);
589 return NULL;
590}
591
592/**
593 * iommu_dma_mmap - Map a buffer into provided user VMA
594 * @pages: Array representing buffer from iommu_dma_alloc()
595 * @size: Size of buffer in bytes
596 * @vma: VMA describing requested userspace mapping
597 *
598 * Maps the pages of the buffer in @pages into @vma. The caller is responsible
599 * for verifying the correct size and protection of @vma beforehand.
600 */
601
602int iommu_dma_mmap(struct page **pages, size_t size, struct vm_area_struct *vma)
603{
604 unsigned long uaddr = vma->vm_start;
605 unsigned int i, count = PAGE_ALIGN(size) >> PAGE_SHIFT;
606 int ret = -ENXIO;
607
608 for (i = vma->vm_pgoff; i < count && uaddr < vma->vm_end; i++) {
609 ret = vm_insert_page(vma, uaddr, pages[i]);
610 if (ret)
611 break;
612 uaddr += PAGE_SIZE;
613 }
614 return ret;
615}
616
Robin Murphy51f8cc92016-11-14 12:16:26 +0000617static dma_addr_t __iommu_dma_map(struct device *dev, phys_addr_t phys,
618 size_t size, int prot)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100619{
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100620 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy842fe512017-03-31 15:46:05 +0100621 struct iommu_dma_cookie *cookie = domain->iova_cookie;
Robin Murphy1cc896e2017-05-15 16:01:30 +0100622 size_t iova_off = 0;
Robin Murphy842fe512017-03-31 15:46:05 +0100623 dma_addr_t iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100624
Robin Murphy1cc896e2017-05-15 16:01:30 +0100625 if (cookie->type == IOMMU_DMA_IOVA_COOKIE) {
626 iova_off = iova_offset(&cookie->iovad, phys);
627 size = iova_align(&cookie->iovad, size + iova_off);
628 }
629
Robin Murphy842fe512017-03-31 15:46:05 +0100630 iova = iommu_dma_alloc_iova(domain, size, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100631 if (!iova)
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200632 return IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100633
Robin Murphy842fe512017-03-31 15:46:05 +0100634 if (iommu_map(domain, iova, phys - iova_off, size, prot)) {
635 iommu_dma_free_iova(cookie, iova, size);
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200636 return IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100637 }
Robin Murphy842fe512017-03-31 15:46:05 +0100638 return iova + iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100639}
640
Robin Murphy51f8cc92016-11-14 12:16:26 +0000641dma_addr_t iommu_dma_map_page(struct device *dev, struct page *page,
642 unsigned long offset, size_t size, int prot)
643{
644 return __iommu_dma_map(dev, page_to_phys(page) + offset, size, prot);
645}
646
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100647void iommu_dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700648 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100649{
Robin Murphy842fe512017-03-31 15:46:05 +0100650 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100651}
652
653/*
654 * Prepare a successfully-mapped scatterlist to give back to the caller.
Robin Murphy809eac52016-04-11 12:32:31 +0100655 *
656 * At this point the segments are already laid out by iommu_dma_map_sg() to
657 * avoid individually crossing any boundaries, so we merely need to check a
658 * segment's start address to avoid concatenating across one.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100659 */
660static int __finalise_sg(struct device *dev, struct scatterlist *sg, int nents,
661 dma_addr_t dma_addr)
662{
Robin Murphy809eac52016-04-11 12:32:31 +0100663 struct scatterlist *s, *cur = sg;
664 unsigned long seg_mask = dma_get_seg_boundary(dev);
665 unsigned int cur_len = 0, max_len = dma_get_max_seg_size(dev);
666 int i, count = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100667
668 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100669 /* Restore this segment's original unaligned fields first */
670 unsigned int s_iova_off = sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100671 unsigned int s_length = sg_dma_len(s);
Robin Murphy809eac52016-04-11 12:32:31 +0100672 unsigned int s_iova_len = s->length;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100673
Robin Murphy809eac52016-04-11 12:32:31 +0100674 s->offset += s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100675 s->length = s_length;
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200676 sg_dma_address(s) = IOMMU_MAPPING_ERROR;
Robin Murphy809eac52016-04-11 12:32:31 +0100677 sg_dma_len(s) = 0;
678
679 /*
680 * Now fill in the real DMA data. If...
681 * - there is a valid output segment to append to
682 * - and this segment starts on an IOVA page boundary
683 * - but doesn't fall at a segment boundary
684 * - and wouldn't make the resulting output segment too long
685 */
686 if (cur_len && !s_iova_off && (dma_addr & seg_mask) &&
687 (cur_len + s_length <= max_len)) {
688 /* ...then concatenate it with the previous one */
689 cur_len += s_length;
690 } else {
691 /* Otherwise start the next output segment */
692 if (i > 0)
693 cur = sg_next(cur);
694 cur_len = s_length;
695 count++;
696
697 sg_dma_address(cur) = dma_addr + s_iova_off;
698 }
699
700 sg_dma_len(cur) = cur_len;
701 dma_addr += s_iova_len;
702
703 if (s_length + s_iova_off < s_iova_len)
704 cur_len = 0;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100705 }
Robin Murphy809eac52016-04-11 12:32:31 +0100706 return count;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100707}
708
709/*
710 * If mapping failed, then just restore the original list,
711 * but making sure the DMA fields are invalidated.
712 */
713static void __invalidate_sg(struct scatterlist *sg, int nents)
714{
715 struct scatterlist *s;
716 int i;
717
718 for_each_sg(sg, s, nents, i) {
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200719 if (sg_dma_address(s) != IOMMU_MAPPING_ERROR)
Robin Murphy07b48ac2016-03-10 19:28:12 +0000720 s->offset += sg_dma_address(s);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100721 if (sg_dma_len(s))
722 s->length = sg_dma_len(s);
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200723 sg_dma_address(s) = IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100724 sg_dma_len(s) = 0;
725 }
726}
727
728/*
729 * The DMA API client is passing in a scatterlist which could describe
730 * any old buffer layout, but the IOMMU API requires everything to be
731 * aligned to IOMMU pages. Hence the need for this complicated bit of
732 * impedance-matching, to be able to hand off a suitably-aligned list,
733 * but still preserve the original offsets and sizes for the caller.
734 */
735int iommu_dma_map_sg(struct device *dev, struct scatterlist *sg,
736 int nents, int prot)
737{
738 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
Robin Murphy842fe512017-03-31 15:46:05 +0100739 struct iommu_dma_cookie *cookie = domain->iova_cookie;
740 struct iova_domain *iovad = &cookie->iovad;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100741 struct scatterlist *s, *prev = NULL;
Robin Murphy842fe512017-03-31 15:46:05 +0100742 dma_addr_t iova;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100743 size_t iova_len = 0;
Robin Murphy809eac52016-04-11 12:32:31 +0100744 unsigned long mask = dma_get_seg_boundary(dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100745 int i;
746
747 /*
748 * Work out how much IOVA space we need, and align the segments to
749 * IOVA granules for the IOMMU driver to handle. With some clever
750 * trickery we can modify the list in-place, but reversibly, by
Robin Murphy809eac52016-04-11 12:32:31 +0100751 * stashing the unaligned parts in the as-yet-unused DMA fields.
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100752 */
753 for_each_sg(sg, s, nents, i) {
Robin Murphy809eac52016-04-11 12:32:31 +0100754 size_t s_iova_off = iova_offset(iovad, s->offset);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100755 size_t s_length = s->length;
Robin Murphy809eac52016-04-11 12:32:31 +0100756 size_t pad_len = (mask - iova_len + 1) & mask;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100757
Robin Murphy809eac52016-04-11 12:32:31 +0100758 sg_dma_address(s) = s_iova_off;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100759 sg_dma_len(s) = s_length;
Robin Murphy809eac52016-04-11 12:32:31 +0100760 s->offset -= s_iova_off;
761 s_length = iova_align(iovad, s_length + s_iova_off);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100762 s->length = s_length;
763
764 /*
Robin Murphy809eac52016-04-11 12:32:31 +0100765 * Due to the alignment of our single IOVA allocation, we can
766 * depend on these assumptions about the segment boundary mask:
767 * - If mask size >= IOVA size, then the IOVA range cannot
768 * possibly fall across a boundary, so we don't care.
769 * - If mask size < IOVA size, then the IOVA range must start
770 * exactly on a boundary, therefore we can lay things out
771 * based purely on segment lengths without needing to know
772 * the actual addresses beforehand.
773 * - The mask must be a power of 2, so pad_len == 0 if
774 * iova_len == 0, thus we cannot dereference prev the first
775 * time through here (i.e. before it has a meaningful value).
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100776 */
Robin Murphy809eac52016-04-11 12:32:31 +0100777 if (pad_len && pad_len < s_length - 1) {
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100778 prev->length += pad_len;
779 iova_len += pad_len;
780 }
781
782 iova_len += s_length;
783 prev = s;
784 }
785
Robin Murphy842fe512017-03-31 15:46:05 +0100786 iova = iommu_dma_alloc_iova(domain, iova_len, dma_get_mask(dev), dev);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100787 if (!iova)
788 goto out_restore_sg;
789
790 /*
791 * We'll leave any physical concatenation to the IOMMU driver's
792 * implementation - it knows better than we do.
793 */
Robin Murphy842fe512017-03-31 15:46:05 +0100794 if (iommu_map_sg(domain, iova, sg, nents, prot) < iova_len)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100795 goto out_free_iova;
796
Robin Murphy842fe512017-03-31 15:46:05 +0100797 return __finalise_sg(dev, sg, nents, iova);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100798
799out_free_iova:
Robin Murphy842fe512017-03-31 15:46:05 +0100800 iommu_dma_free_iova(cookie, iova, iova_len);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100801out_restore_sg:
802 __invalidate_sg(sg, nents);
803 return 0;
804}
805
806void iommu_dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700807 enum dma_data_direction dir, unsigned long attrs)
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100808{
Robin Murphy842fe512017-03-31 15:46:05 +0100809 dma_addr_t start, end;
810 struct scatterlist *tmp;
811 int i;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100812 /*
813 * The scatterlist segments are mapped into a single
814 * contiguous IOVA allocation, so this is incredibly easy.
815 */
Robin Murphy842fe512017-03-31 15:46:05 +0100816 start = sg_dma_address(sg);
817 for_each_sg(sg_next(sg), tmp, nents - 1, i) {
818 if (sg_dma_len(tmp) == 0)
819 break;
820 sg = tmp;
821 }
822 end = sg_dma_address(sg) + sg_dma_len(sg);
823 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), start, end - start);
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100824}
825
Robin Murphy51f8cc92016-11-14 12:16:26 +0000826dma_addr_t iommu_dma_map_resource(struct device *dev, phys_addr_t phys,
827 size_t size, enum dma_data_direction dir, unsigned long attrs)
828{
829 return __iommu_dma_map(dev, phys, size,
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530830 dma_info_to_prot(dir, false, attrs) | IOMMU_MMIO);
Robin Murphy51f8cc92016-11-14 12:16:26 +0000831}
832
833void iommu_dma_unmap_resource(struct device *dev, dma_addr_t handle,
834 size_t size, enum dma_data_direction dir, unsigned long attrs)
835{
Robin Murphy842fe512017-03-31 15:46:05 +0100836 __iommu_dma_unmap(iommu_get_domain_for_dev(dev), handle, size);
Robin Murphy51f8cc92016-11-14 12:16:26 +0000837}
838
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100839int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
840{
Christoph Hellwig81a5a312017-05-22 10:55:30 +0200841 return dma_addr == IOMMU_MAPPING_ERROR;
Robin Murphy0db2e5d2015-10-01 20:13:58 +0100842}
Robin Murphy44bb7e22016-09-12 17:13:59 +0100843
844static struct iommu_dma_msi_page *iommu_dma_get_msi_page(struct device *dev,
845 phys_addr_t msi_addr, struct iommu_domain *domain)
846{
847 struct iommu_dma_cookie *cookie = domain->iova_cookie;
848 struct iommu_dma_msi_page *msi_page;
Robin Murphy842fe512017-03-31 15:46:05 +0100849 dma_addr_t iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100850 int prot = IOMMU_WRITE | IOMMU_NOEXEC | IOMMU_MMIO;
Robin Murphyfdbe5742017-01-19 20:57:46 +0000851 size_t size = cookie_msi_granule(cookie);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100852
Robin Murphyfdbe5742017-01-19 20:57:46 +0000853 msi_addr &= ~(phys_addr_t)(size - 1);
Robin Murphy44bb7e22016-09-12 17:13:59 +0100854 list_for_each_entry(msi_page, &cookie->msi_page_list, list)
855 if (msi_page->phys == msi_addr)
856 return msi_page;
857
858 msi_page = kzalloc(sizeof(*msi_page), GFP_ATOMIC);
859 if (!msi_page)
860 return NULL;
861
Robin Murphya44e6652017-03-31 15:46:06 +0100862 iova = __iommu_dma_map(dev, msi_addr, size, prot);
863 if (iommu_dma_mapping_error(dev, iova))
864 goto out_free_page;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100865
866 INIT_LIST_HEAD(&msi_page->list);
Robin Murphya44e6652017-03-31 15:46:06 +0100867 msi_page->phys = msi_addr;
868 msi_page->iova = iova;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100869 list_add(&msi_page->list, &cookie->msi_page_list);
870 return msi_page;
871
Robin Murphy44bb7e22016-09-12 17:13:59 +0100872out_free_page:
873 kfree(msi_page);
874 return NULL;
875}
876
877void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
878{
879 struct device *dev = msi_desc_to_dev(irq_get_msi_desc(irq));
880 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
881 struct iommu_dma_cookie *cookie;
882 struct iommu_dma_msi_page *msi_page;
883 phys_addr_t msi_addr = (u64)msg->address_hi << 32 | msg->address_lo;
884 unsigned long flags;
885
886 if (!domain || !domain->iova_cookie)
887 return;
888
889 cookie = domain->iova_cookie;
890
891 /*
892 * We disable IRQs to rule out a possible inversion against
893 * irq_desc_lock if, say, someone tries to retarget the affinity
894 * of an MSI from within an IPI handler.
895 */
896 spin_lock_irqsave(&cookie->msi_lock, flags);
897 msi_page = iommu_dma_get_msi_page(dev, msi_addr, domain);
898 spin_unlock_irqrestore(&cookie->msi_lock, flags);
899
900 if (WARN_ON(!msi_page)) {
901 /*
902 * We're called from a void callback, so the best we can do is
903 * 'fail' by filling the message with obviously bogus values.
904 * Since we got this far due to an IOMMU being present, it's
905 * not like the existing address would have worked anyway...
906 */
907 msg->address_hi = ~0U;
908 msg->address_lo = ~0U;
909 msg->data = ~0U;
910 } else {
911 msg->address_hi = upper_32_bits(msi_page->iova);
Robin Murphyfdbe5742017-01-19 20:57:46 +0000912 msg->address_lo &= cookie_msi_granule(cookie) - 1;
Robin Murphy44bb7e22016-09-12 17:13:59 +0100913 msg->address_lo += lower_32_bits(msi_page->iova);
914 }
915}