blob: 9fb1263e0533986de7ef19a6a3253280f2bf04d6 [file] [log] [blame]
Catalin Marinas09b55412012-03-05 11:49:30 +00001/*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/gfp.h>
Arnd Bergmann1dccb592015-11-16 17:25:48 +010021#include <linux/acpi.h>
Jisheng Zhangb67a8b22016-06-08 15:53:46 +080022#include <linux/bootmem.h>
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080023#include <linux/cache.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000024#include <linux/export.h>
25#include <linux/slab.h>
Laura Abbottd4932f92014-10-09 15:26:44 -070026#include <linux/genalloc.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000027#include <linux/dma-mapping.h>
Laura Abbott6ac21042013-12-12 19:28:33 +000028#include <linux/dma-contiguous.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000029#include <linux/vmalloc.h>
30#include <linux/swiotlb.h>
31
32#include <asm/cacheflush.h>
33
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080034static int swiotlb __ro_after_init;
Jisheng Zhangb67a8b22016-06-08 15:53:46 +080035
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070036static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
Laura Abbott214fdbe2014-03-14 19:52:24 +000037 bool coherent)
38{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070039 if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
Laura Abbott214fdbe2014-03-14 19:52:24 +000040 return pgprot_writecombine(prot);
Laura Abbott214fdbe2014-03-14 19:52:24 +000041 return prot;
42}
43
Laura Abbottd4932f92014-10-09 15:26:44 -070044static struct gen_pool *atomic_pool;
45
46#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
Jisheng Zhanga7c61a32015-11-20 17:59:10 +080047static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
Laura Abbottd4932f92014-10-09 15:26:44 -070048
49static int __init early_coherent_pool(char *p)
50{
51 atomic_pool_size = memparse(p, &p);
52 return 0;
53}
54early_param("coherent_pool", early_coherent_pool);
55
Suzuki K. Poulose71328132015-03-19 18:17:09 +000056static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
Laura Abbottd4932f92014-10-09 15:26:44 -070057{
58 unsigned long val;
59 void *ptr = NULL;
60
61 if (!atomic_pool) {
62 WARN(1, "coherent pool not initialised!\n");
63 return NULL;
64 }
65
66 val = gen_pool_alloc(atomic_pool, size);
67 if (val) {
68 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
69
70 *ret_page = phys_to_page(phys);
71 ptr = (void *)val;
Marek Szyprowski6829e272015-04-23 12:46:16 +010072 memset(ptr, 0, size);
Laura Abbottd4932f92014-10-09 15:26:44 -070073 }
74
75 return ptr;
76}
77
78static bool __in_atomic_pool(void *start, size_t size)
79{
80 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
81}
82
83static int __free_from_pool(void *start, size_t size)
84{
85 if (!__in_atomic_pool(start, size))
86 return 0;
87
88 gen_pool_free(atomic_pool, (unsigned long)start, size);
89
90 return 1;
91}
92
Ritesh Harjanibb10eb72014-02-06 17:21:51 +053093static void *__dma_alloc_coherent(struct device *dev, size_t size,
94 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070095 unsigned long attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +000096{
Laura Abbottc666e8d2013-12-12 19:28:32 +000097 if (dev == NULL) {
98 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
99 return NULL;
100 }
101
Catalin Marinas19e76402014-02-27 12:09:22 +0000102 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
Catalin Marinas09b55412012-03-05 11:49:30 +0000103 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
Catalin Marinas19e76402014-02-27 12:09:22 +0000104 flags |= GFP_DMA;
Mel Gormand0164ad2015-11-06 16:28:21 -0800105 if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
Laura Abbott6ac21042013-12-12 19:28:33 +0000106 struct page *page;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000107 void *addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000108
109 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
110 get_order(size));
111 if (!page)
112 return NULL;
113
114 *dma_handle = phys_to_dma(dev, page_to_phys(page));
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000115 addr = page_address(page);
Marek Szyprowski6829e272015-04-23 12:46:16 +0100116 memset(addr, 0, size);
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000117 return addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000118 } else {
119 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
120 }
Catalin Marinas09b55412012-03-05 11:49:30 +0000121}
122
Ritesh Harjanibb10eb72014-02-06 17:21:51 +0530123static void __dma_free_coherent(struct device *dev, size_t size,
124 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700125 unsigned long attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +0000126{
Laura Abbottd4932f92014-10-09 15:26:44 -0700127 bool freed;
128 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
129
Laura Abbottc666e8d2013-12-12 19:28:32 +0000130 if (dev == NULL) {
131 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
132 return;
133 }
134
Laura Abbottd4932f92014-10-09 15:26:44 -0700135 freed = dma_release_from_contiguous(dev,
Laura Abbott6ac21042013-12-12 19:28:33 +0000136 phys_to_page(paddr),
137 size >> PAGE_SHIFT);
Laura Abbottd4932f92014-10-09 15:26:44 -0700138 if (!freed)
Laura Abbott6ac21042013-12-12 19:28:33 +0000139 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
Catalin Marinas09b55412012-03-05 11:49:30 +0000140}
141
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000142static void *__dma_alloc(struct device *dev, size_t size,
143 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700144 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100145{
Laura Abbottd4932f92014-10-09 15:26:44 -0700146 struct page *page;
Catalin Marinas73635902013-05-21 17:35:19 +0100147 void *ptr, *coherent_ptr;
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000148 bool coherent = is_device_dma_coherent(dev);
Robin Murphy97942c22015-07-31 18:28:34 +0100149 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
Catalin Marinas73635902013-05-21 17:35:19 +0100150
151 size = PAGE_ALIGN(size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700152
Mel Gormand0164ad2015-11-06 16:28:21 -0800153 if (!coherent && !gfpflags_allow_blocking(flags)) {
Laura Abbottd4932f92014-10-09 15:26:44 -0700154 struct page *page = NULL;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000155 void *addr = __alloc_from_pool(size, &page, flags);
Laura Abbottd4932f92014-10-09 15:26:44 -0700156
157 if (addr)
158 *dma_handle = phys_to_dma(dev, page_to_phys(page));
159
160 return addr;
Laura Abbottd4932f92014-10-09 15:26:44 -0700161 }
Catalin Marinas73635902013-05-21 17:35:19 +0100162
163 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
164 if (!ptr)
165 goto no_mem;
Catalin Marinas73635902013-05-21 17:35:19 +0100166
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000167 /* no need for non-cacheable mapping if coherent */
168 if (coherent)
169 return ptr;
170
Catalin Marinas73635902013-05-21 17:35:19 +0100171 /* remove any dirty cache lines on the kernel alias */
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900172 __dma_flush_area(ptr, size);
Catalin Marinas73635902013-05-21 17:35:19 +0100173
174 /* create a coherent mapping */
175 page = virt_to_page(ptr);
Laura Abbottd4932f92014-10-09 15:26:44 -0700176 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
Jin Qian7663ed42015-09-29 18:57:35 -0700177 prot, __builtin_return_address(0));
Catalin Marinas73635902013-05-21 17:35:19 +0100178 if (!coherent_ptr)
179 goto no_map;
180
181 return coherent_ptr;
182
183no_map:
184 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
185no_mem:
Sean Paula52ce122014-10-01 16:31:50 +0100186 *dma_handle = DMA_ERROR_CODE;
Catalin Marinas73635902013-05-21 17:35:19 +0100187 return NULL;
188}
189
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000190static void __dma_free(struct device *dev, size_t size,
191 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700192 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100193{
194 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
195
Dean Nelson2cff98b2015-04-29 16:09:18 +0100196 size = PAGE_ALIGN(size);
197
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000198 if (!is_device_dma_coherent(dev)) {
199 if (__free_from_pool(vaddr, size))
200 return;
201 vunmap(vaddr);
202 }
Catalin Marinas73635902013-05-21 17:35:19 +0100203 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
204}
205
206static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
207 unsigned long offset, size_t size,
208 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700209 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100210{
211 dma_addr_t dev_addr;
212
213 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000214 if (!is_device_dma_coherent(dev))
215 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100216
217 return dev_addr;
218}
219
220
221static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
222 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700223 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100224{
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000225 if (!is_device_dma_coherent(dev))
226 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100227 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
228}
229
230static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
231 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700232 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100233{
234 struct scatterlist *sg;
235 int i, ret;
236
237 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000238 if (!is_device_dma_coherent(dev))
239 for_each_sg(sgl, sg, ret, i)
240 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
241 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100242
243 return ret;
244}
245
246static void __swiotlb_unmap_sg_attrs(struct device *dev,
247 struct scatterlist *sgl, int nelems,
248 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700249 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100250{
251 struct scatterlist *sg;
252 int i;
253
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000254 if (!is_device_dma_coherent(dev))
255 for_each_sg(sgl, sg, nelems, i)
256 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
257 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100258 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
259}
260
261static void __swiotlb_sync_single_for_cpu(struct device *dev,
262 dma_addr_t dev_addr, size_t size,
263 enum dma_data_direction dir)
264{
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000265 if (!is_device_dma_coherent(dev))
266 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100267 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
268}
269
270static void __swiotlb_sync_single_for_device(struct device *dev,
271 dma_addr_t dev_addr, size_t size,
272 enum dma_data_direction dir)
273{
274 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000275 if (!is_device_dma_coherent(dev))
276 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100277}
278
279static void __swiotlb_sync_sg_for_cpu(struct device *dev,
280 struct scatterlist *sgl, int nelems,
281 enum dma_data_direction dir)
282{
283 struct scatterlist *sg;
284 int i;
285
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000286 if (!is_device_dma_coherent(dev))
287 for_each_sg(sgl, sg, nelems, i)
288 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
289 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100290 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
291}
292
293static void __swiotlb_sync_sg_for_device(struct device *dev,
294 struct scatterlist *sgl, int nelems,
295 enum dma_data_direction dir)
296{
297 struct scatterlist *sg;
298 int i;
299
300 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000301 if (!is_device_dma_coherent(dev))
302 for_each_sg(sgl, sg, nelems, i)
303 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
304 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100305}
306
Robin Murphyaaf6f2f2015-07-10 16:47:56 +0100307static int __swiotlb_mmap(struct device *dev,
308 struct vm_area_struct *vma,
309 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700310 unsigned long attrs)
Laura Abbott6e8d7962014-03-14 19:52:23 +0000311{
312 int ret = -ENXIO;
313 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
314 PAGE_SHIFT;
315 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
316 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
317 unsigned long off = vma->vm_pgoff;
318
Robin Murphyaaf6f2f2015-07-10 16:47:56 +0100319 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
320 is_device_dma_coherent(dev));
321
Laura Abbott6e8d7962014-03-14 19:52:23 +0000322 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
323 return ret;
324
325 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
326 ret = remap_pfn_range(vma, vma->vm_start,
327 pfn + off,
328 vma->vm_end - vma->vm_start,
329 vma->vm_page_prot);
330 }
331
332 return ret;
333}
334
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100335static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
336 void *cpu_addr, dma_addr_t handle, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700337 unsigned long attrs)
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100338{
339 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
340
341 if (!ret)
342 sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
343 PAGE_ALIGN(size), 0);
344
345 return ret;
346}
347
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800348static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
349{
350 if (swiotlb)
351 return swiotlb_dma_supported(hwdev, mask);
352 return 1;
353}
354
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000355static struct dma_map_ops swiotlb_dma_ops = {
356 .alloc = __dma_alloc,
357 .free = __dma_free,
358 .mmap = __swiotlb_mmap,
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100359 .get_sgtable = __swiotlb_get_sgtable,
Catalin Marinas73635902013-05-21 17:35:19 +0100360 .map_page = __swiotlb_map_page,
361 .unmap_page = __swiotlb_unmap_page,
362 .map_sg = __swiotlb_map_sg_attrs,
363 .unmap_sg = __swiotlb_unmap_sg_attrs,
364 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
365 .sync_single_for_device = __swiotlb_sync_single_for_device,
366 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
367 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800368 .dma_supported = __swiotlb_dma_supported,
Catalin Marinas73635902013-05-21 17:35:19 +0100369 .mapping_error = swiotlb_dma_mapping_error,
370};
Catalin Marinas09b55412012-03-05 11:49:30 +0000371
Laura Abbottd4932f92014-10-09 15:26:44 -0700372static int __init atomic_pool_init(void)
373{
374 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
375 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
376 struct page *page;
377 void *addr;
378 unsigned int pool_size_order = get_order(atomic_pool_size);
379
380 if (dev_get_cma_area(NULL))
381 page = dma_alloc_from_contiguous(NULL, nr_pages,
382 pool_size_order);
383 else
384 page = alloc_pages(GFP_DMA, pool_size_order);
385
386 if (page) {
387 int ret;
388 void *page_addr = page_address(page);
389
390 memset(page_addr, 0, atomic_pool_size);
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900391 __dma_flush_area(page_addr, atomic_pool_size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700392
393 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
394 if (!atomic_pool)
395 goto free_page;
396
397 addr = dma_common_contiguous_remap(page, atomic_pool_size,
398 VM_USERMAP, prot, atomic_pool_init);
399
400 if (!addr)
401 goto destroy_genpool;
402
403 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
404 page_to_phys(page),
405 atomic_pool_size, -1);
406 if (ret)
407 goto remove_mapping;
408
409 gen_pool_set_algo(atomic_pool,
410 gen_pool_first_fit_order_align,
411 (void *)PAGE_SHIFT);
412
413 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
414 atomic_pool_size / 1024);
415 return 0;
416 }
417 goto out;
418
419remove_mapping:
420 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
421destroy_genpool:
422 gen_pool_destroy(atomic_pool);
423 atomic_pool = NULL;
424free_page:
425 if (!dma_release_from_contiguous(NULL, page, nr_pages))
426 __free_pages(page, pool_size_order);
427out:
428 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
429 atomic_pool_size / 1024);
430 return -ENOMEM;
431}
432
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500433/********************************************
434 * The following APIs are for dummy DMA ops *
435 ********************************************/
436
437static void *__dummy_alloc(struct device *dev, size_t size,
438 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700439 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500440{
441 return NULL;
442}
443
444static void __dummy_free(struct device *dev, size_t size,
445 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700446 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500447{
448}
449
450static int __dummy_mmap(struct device *dev,
451 struct vm_area_struct *vma,
452 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700453 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500454{
455 return -ENXIO;
456}
457
458static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
459 unsigned long offset, size_t size,
460 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700461 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500462{
463 return DMA_ERROR_CODE;
464}
465
466static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
467 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700468 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500469{
470}
471
472static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
473 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700474 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500475{
476 return 0;
477}
478
479static void __dummy_unmap_sg(struct device *dev,
480 struct scatterlist *sgl, int nelems,
481 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700482 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500483{
484}
485
486static void __dummy_sync_single(struct device *dev,
487 dma_addr_t dev_addr, size_t size,
488 enum dma_data_direction dir)
489{
490}
491
492static void __dummy_sync_sg(struct device *dev,
493 struct scatterlist *sgl, int nelems,
494 enum dma_data_direction dir)
495{
496}
497
498static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
499{
500 return 1;
501}
502
503static int __dummy_dma_supported(struct device *hwdev, u64 mask)
504{
505 return 0;
506}
507
508struct dma_map_ops dummy_dma_ops = {
509 .alloc = __dummy_alloc,
510 .free = __dummy_free,
511 .mmap = __dummy_mmap,
512 .map_page = __dummy_map_page,
513 .unmap_page = __dummy_unmap_page,
514 .map_sg = __dummy_map_sg,
515 .unmap_sg = __dummy_unmap_sg,
516 .sync_single_for_cpu = __dummy_sync_single,
517 .sync_single_for_device = __dummy_sync_single,
518 .sync_sg_for_cpu = __dummy_sync_sg,
519 .sync_sg_for_device = __dummy_sync_sg,
520 .mapping_error = __dummy_mapping_error,
521 .dma_supported = __dummy_dma_supported,
522};
523EXPORT_SYMBOL(dummy_dma_ops);
524
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000525static int __init arm64_dma_init(void)
Catalin Marinas09b55412012-03-05 11:49:30 +0000526{
Geert Uytterhoeven1fd1e6c2016-12-16 14:28:41 +0100527 if (swiotlb_force == SWIOTLB_FORCE ||
528 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800529 swiotlb = 1;
530
Arnd Bergmann1dccb592015-11-16 17:25:48 +0100531 return atomic_pool_init();
Laura Abbottd4932f92014-10-09 15:26:44 -0700532}
533arch_initcall(arm64_dma_init);
Catalin Marinas09b55412012-03-05 11:49:30 +0000534
535#define PREALLOC_DMA_DEBUG_ENTRIES 4096
536
537static int __init dma_debug_do_init(void)
538{
539 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
540 return 0;
541}
542fs_initcall(dma_debug_do_init);
Robin Murphy13b86292015-10-01 20:13:59 +0100543
544
545#ifdef CONFIG_IOMMU_DMA
546#include <linux/dma-iommu.h>
547#include <linux/platform_device.h>
548#include <linux/amba/bus.h>
549
550/* Thankfully, all cache ops are by VA so we can ignore phys here */
551static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
552{
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900553 __dma_flush_area(virt, PAGE_SIZE);
Robin Murphy13b86292015-10-01 20:13:59 +0100554}
555
556static void *__iommu_alloc_attrs(struct device *dev, size_t size,
557 dma_addr_t *handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700558 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100559{
560 bool coherent = is_device_dma_coherent(dev);
561 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000562 size_t iosize = size;
Robin Murphy13b86292015-10-01 20:13:59 +0100563 void *addr;
564
565 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
566 return NULL;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000567
568 size = PAGE_ALIGN(size);
569
Robin Murphy13b86292015-10-01 20:13:59 +0100570 /*
571 * Some drivers rely on this, and we probably don't want the
572 * possibility of stale kernel data being read by devices anyway.
573 */
574 gfp |= __GFP_ZERO;
575
Andrew Mortonce5c2d22015-11-07 16:06:59 -0800576 if (gfpflags_allow_blocking(gfp)) {
Robin Murphy13b86292015-10-01 20:13:59 +0100577 struct page **pages;
578 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
579
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100580 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
581 handle, flush_page);
Robin Murphy13b86292015-10-01 20:13:59 +0100582 if (!pages)
583 return NULL;
584
585 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
586 __builtin_return_address(0));
587 if (!addr)
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000588 iommu_dma_free(dev, pages, iosize, handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100589 } else {
590 struct page *page;
591 /*
592 * In atomic context we can't remap anything, so we'll only
593 * get the virtually contiguous buffer we need by way of a
594 * physically contiguous allocation.
595 */
596 if (coherent) {
597 page = alloc_pages(gfp, get_order(size));
598 addr = page ? page_address(page) : NULL;
599 } else {
600 addr = __alloc_from_pool(size, &page, gfp);
601 }
602 if (!addr)
603 return NULL;
604
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000605 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
Robin Murphy13b86292015-10-01 20:13:59 +0100606 if (iommu_dma_mapping_error(dev, *handle)) {
607 if (coherent)
608 __free_pages(page, get_order(size));
609 else
610 __free_from_pool(addr, size);
611 addr = NULL;
612 }
613 }
614 return addr;
615}
616
617static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700618 dma_addr_t handle, unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100619{
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000620 size_t iosize = size;
621
622 size = PAGE_ALIGN(size);
Robin Murphy13b86292015-10-01 20:13:59 +0100623 /*
624 * @cpu_addr will be one of 3 things depending on how it was allocated:
625 * - A remapped array of pages from iommu_dma_alloc(), for all
626 * non-atomic allocations.
627 * - A non-cacheable alias from the atomic pool, for atomic
628 * allocations by non-coherent devices.
629 * - A normal lowmem address, for atomic allocations by
630 * coherent devices.
631 * Hence how dodgy the below logic looks...
632 */
633 if (__in_atomic_pool(cpu_addr, size)) {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700634 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
Robin Murphy13b86292015-10-01 20:13:59 +0100635 __free_from_pool(cpu_addr, size);
636 } else if (is_vmalloc_addr(cpu_addr)){
637 struct vm_struct *area = find_vm_area(cpu_addr);
638
639 if (WARN_ON(!area || !area->pages))
640 return;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000641 iommu_dma_free(dev, area->pages, iosize, &handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100642 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
643 } else {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700644 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
Robin Murphy13b86292015-10-01 20:13:59 +0100645 __free_pages(virt_to_page(cpu_addr), get_order(size));
646 }
647}
648
649static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
650 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700651 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100652{
653 struct vm_struct *area;
654 int ret;
655
656 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
657 is_device_dma_coherent(dev));
658
659 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
660 return ret;
661
662 area = find_vm_area(cpu_addr);
663 if (WARN_ON(!area || !area->pages))
664 return -ENXIO;
665
666 return iommu_dma_mmap(area->pages, size, vma);
667}
668
669static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
670 void *cpu_addr, dma_addr_t dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700671 size_t size, unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100672{
673 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
674 struct vm_struct *area = find_vm_area(cpu_addr);
675
676 if (WARN_ON(!area || !area->pages))
677 return -ENXIO;
678
679 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
680 GFP_KERNEL);
681}
682
683static void __iommu_sync_single_for_cpu(struct device *dev,
684 dma_addr_t dev_addr, size_t size,
685 enum dma_data_direction dir)
686{
687 phys_addr_t phys;
688
689 if (is_device_dma_coherent(dev))
690 return;
691
692 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
693 __dma_unmap_area(phys_to_virt(phys), size, dir);
694}
695
696static void __iommu_sync_single_for_device(struct device *dev,
697 dma_addr_t dev_addr, size_t size,
698 enum dma_data_direction dir)
699{
700 phys_addr_t phys;
701
702 if (is_device_dma_coherent(dev))
703 return;
704
705 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
706 __dma_map_area(phys_to_virt(phys), size, dir);
707}
708
709static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
710 unsigned long offset, size_t size,
711 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700712 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100713{
714 bool coherent = is_device_dma_coherent(dev);
715 int prot = dma_direction_to_prot(dir, coherent);
716 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
717
718 if (!iommu_dma_mapping_error(dev, dev_addr) &&
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700719 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100720 __iommu_sync_single_for_device(dev, dev_addr, size, dir);
721
722 return dev_addr;
723}
724
725static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
726 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700727 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100728{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700729 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100730 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
731
732 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
733}
734
735static void __iommu_sync_sg_for_cpu(struct device *dev,
736 struct scatterlist *sgl, int nelems,
737 enum dma_data_direction dir)
738{
739 struct scatterlist *sg;
740 int i;
741
742 if (is_device_dma_coherent(dev))
743 return;
744
745 for_each_sg(sgl, sg, nelems, i)
746 __dma_unmap_area(sg_virt(sg), sg->length, dir);
747}
748
749static void __iommu_sync_sg_for_device(struct device *dev,
750 struct scatterlist *sgl, int nelems,
751 enum dma_data_direction dir)
752{
753 struct scatterlist *sg;
754 int i;
755
756 if (is_device_dma_coherent(dev))
757 return;
758
759 for_each_sg(sgl, sg, nelems, i)
760 __dma_map_area(sg_virt(sg), sg->length, dir);
761}
762
763static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
764 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700765 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100766{
767 bool coherent = is_device_dma_coherent(dev);
768
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700769 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100770 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
771
772 return iommu_dma_map_sg(dev, sgl, nelems,
773 dma_direction_to_prot(dir, coherent));
774}
775
776static void __iommu_unmap_sg_attrs(struct device *dev,
777 struct scatterlist *sgl, int nelems,
778 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700779 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100780{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700781 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100782 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
783
784 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
785}
786
787static struct dma_map_ops iommu_dma_ops = {
788 .alloc = __iommu_alloc_attrs,
789 .free = __iommu_free_attrs,
790 .mmap = __iommu_mmap_attrs,
791 .get_sgtable = __iommu_get_sgtable,
792 .map_page = __iommu_map_page,
793 .unmap_page = __iommu_unmap_page,
794 .map_sg = __iommu_map_sg_attrs,
795 .unmap_sg = __iommu_unmap_sg_attrs,
796 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
797 .sync_single_for_device = __iommu_sync_single_for_device,
798 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
799 .sync_sg_for_device = __iommu_sync_sg_for_device,
800 .dma_supported = iommu_dma_supported,
801 .mapping_error = iommu_dma_mapping_error,
802};
803
804/*
805 * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
806 * everything it needs to - the device is only partially created and the
807 * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
808 * need this delayed attachment dance. Once IOMMU probe ordering is sorted
809 * to move the arch_setup_dma_ops() call later, all the notifier bits below
810 * become unnecessary, and will go away.
811 */
812struct iommu_dma_notifier_data {
813 struct list_head list;
814 struct device *dev;
815 const struct iommu_ops *ops;
816 u64 dma_base;
817 u64 size;
818};
819static LIST_HEAD(iommu_dma_masters);
820static DEFINE_MUTEX(iommu_dma_notifier_lock);
821
Robin Murphy13b86292015-10-01 20:13:59 +0100822static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
823 u64 dma_base, u64 size)
824{
825 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
826
827 /*
Robin Murphy921b1f52016-04-19 17:01:32 +0100828 * If the IOMMU driver has the DMA domain support that we require,
829 * then the IOMMU core will have already configured a group for this
830 * device, and allocated the default domain for that group.
Robin Murphy13b86292015-10-01 20:13:59 +0100831 */
Robin Murphyfade1ec2016-09-12 17:14:00 +0100832 if (!domain || iommu_dma_init_domain(domain, dma_base, size, dev)) {
Robin Murphy921b1f52016-04-19 17:01:32 +0100833 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
834 dev_name(dev));
835 return false;
Robin Murphy13b86292015-10-01 20:13:59 +0100836 }
837
Robin Murphy13b86292015-10-01 20:13:59 +0100838 dev->archdata.dma_ops = &iommu_dma_ops;
839 return true;
Robin Murphy13b86292015-10-01 20:13:59 +0100840}
841
842static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
843 u64 dma_base, u64 size)
844{
845 struct iommu_dma_notifier_data *iommudata;
846
847 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
848 if (!iommudata)
849 return;
850
851 iommudata->dev = dev;
852 iommudata->ops = ops;
853 iommudata->dma_base = dma_base;
854 iommudata->size = size;
855
856 mutex_lock(&iommu_dma_notifier_lock);
857 list_add(&iommudata->list, &iommu_dma_masters);
858 mutex_unlock(&iommu_dma_notifier_lock);
859}
860
861static int __iommu_attach_notifier(struct notifier_block *nb,
862 unsigned long action, void *data)
863{
864 struct iommu_dma_notifier_data *master, *tmp;
865
Lorenzo Pieralisi16c11322016-07-01 17:50:10 +0100866 if (action != BUS_NOTIFY_BIND_DRIVER)
Robin Murphy13b86292015-10-01 20:13:59 +0100867 return 0;
868
869 mutex_lock(&iommu_dma_notifier_lock);
870 list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
Lorenzo Pieralisi16c11322016-07-01 17:50:10 +0100871 if (data == master->dev && do_iommu_attach(master->dev,
872 master->ops, master->dma_base, master->size)) {
Robin Murphy13b86292015-10-01 20:13:59 +0100873 list_del(&master->list);
874 kfree(master);
Lorenzo Pieralisi16c11322016-07-01 17:50:10 +0100875 break;
Robin Murphy13b86292015-10-01 20:13:59 +0100876 }
877 }
878 mutex_unlock(&iommu_dma_notifier_lock);
879 return 0;
880}
881
Jisheng Zhanga7c61a32015-11-20 17:59:10 +0800882static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
Robin Murphy13b86292015-10-01 20:13:59 +0100883{
884 struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
885 int ret;
886
887 if (!nb)
888 return -ENOMEM;
Lorenzo Pieralisi16c11322016-07-01 17:50:10 +0100889
Robin Murphy13b86292015-10-01 20:13:59 +0100890 nb->notifier_call = __iommu_attach_notifier;
Robin Murphy13b86292015-10-01 20:13:59 +0100891
892 ret = bus_register_notifier(bus, nb);
893 if (ret) {
894 pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
895 bus->name);
896 kfree(nb);
897 }
898 return ret;
899}
900
901static int __init __iommu_dma_init(void)
902{
903 int ret;
904
905 ret = iommu_dma_init();
906 if (!ret)
907 ret = register_iommu_dma_ops_notifier(&platform_bus_type);
908 if (!ret)
909 ret = register_iommu_dma_ops_notifier(&amba_bustype);
Robin Murphy226d89c2016-04-19 17:01:31 +0100910#ifdef CONFIG_PCI
911 if (!ret)
912 ret = register_iommu_dma_ops_notifier(&pci_bus_type);
913#endif
Robin Murphy13b86292015-10-01 20:13:59 +0100914 return ret;
915}
916arch_initcall(__iommu_dma_init);
917
918static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
919 const struct iommu_ops *ops)
920{
921 struct iommu_group *group;
922
923 if (!ops)
924 return;
925 /*
926 * TODO: As a concession to the future, we're ready to handle being
927 * called both early and late (i.e. after bus_add_device). Once all
928 * the platform bus code is reworked to call us late and the notifier
929 * junk above goes away, move the body of do_iommu_attach here.
930 */
931 group = iommu_group_get(dev);
932 if (group) {
933 do_iommu_attach(dev, ops, dma_base, size);
934 iommu_group_put(group);
935 } else {
936 queue_iommu_attach(dev, ops, dma_base, size);
937 }
938}
939
Robin Murphy876945d2015-10-01 20:14:00 +0100940void arch_teardown_dma_ops(struct device *dev)
941{
942 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
943
Robin Murphy921b1f52016-04-19 17:01:32 +0100944 if (WARN_ON(domain))
Robin Murphy876945d2015-10-01 20:14:00 +0100945 iommu_detach_device(domain, dev);
Robin Murphy876945d2015-10-01 20:14:00 +0100946
947 dev->archdata.dma_ops = NULL;
948}
949
Robin Murphy13b86292015-10-01 20:13:59 +0100950#else
951
952static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
Robin Murphy53c92d72016-04-07 18:42:05 +0100953 const struct iommu_ops *iommu)
Robin Murphy13b86292015-10-01 20:13:59 +0100954{ }
955
956#endif /* CONFIG_IOMMU_DMA */
957
Robin Murphy876945d2015-10-01 20:14:00 +0100958void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
Robin Murphy53c92d72016-04-07 18:42:05 +0100959 const struct iommu_ops *iommu, bool coherent)
Robin Murphy876945d2015-10-01 20:14:00 +0100960{
Arnd Bergmann1dccb592015-11-16 17:25:48 +0100961 if (!dev->archdata.dma_ops)
962 dev->archdata.dma_ops = &swiotlb_dma_ops;
Robin Murphy876945d2015-10-01 20:14:00 +0100963
964 dev->archdata.dma_coherent = coherent;
965 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
966}