blob: 5f62090bda2435e3419720c8a5ecdcf826e19018 [file] [log] [blame]
Catalin Marinas09b55412012-03-05 11:49:30 +00001/*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/gfp.h>
Arnd Bergmann1dccb592015-11-16 17:25:48 +010021#include <linux/acpi.h>
Jisheng Zhangb67a8b22016-06-08 15:53:46 +080022#include <linux/bootmem.h>
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080023#include <linux/cache.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000024#include <linux/export.h>
25#include <linux/slab.h>
Laura Abbottd4932f92014-10-09 15:26:44 -070026#include <linux/genalloc.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000027#include <linux/dma-mapping.h>
Laura Abbott6ac21042013-12-12 19:28:33 +000028#include <linux/dma-contiguous.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000029#include <linux/vmalloc.h>
30#include <linux/swiotlb.h>
Joerg Roedel461a6942017-04-26 15:46:20 +020031#include <linux/pci.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000032
33#include <asm/cacheflush.h>
34
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080035static int swiotlb __ro_after_init;
Jisheng Zhangb67a8b22016-06-08 15:53:46 +080036
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070037static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
Laura Abbott214fdbe2014-03-14 19:52:24 +000038 bool coherent)
39{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070040 if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
Laura Abbott214fdbe2014-03-14 19:52:24 +000041 return pgprot_writecombine(prot);
Laura Abbott214fdbe2014-03-14 19:52:24 +000042 return prot;
43}
44
Vladimir Murzin8165f702017-08-14 09:55:47 +010045static struct gen_pool *atomic_pool __ro_after_init;
Laura Abbottd4932f92014-10-09 15:26:44 -070046
47#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
Jisheng Zhanga7c61a32015-11-20 17:59:10 +080048static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
Laura Abbottd4932f92014-10-09 15:26:44 -070049
50static int __init early_coherent_pool(char *p)
51{
52 atomic_pool_size = memparse(p, &p);
53 return 0;
54}
55early_param("coherent_pool", early_coherent_pool);
56
Suzuki K. Poulose71328132015-03-19 18:17:09 +000057static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
Laura Abbottd4932f92014-10-09 15:26:44 -070058{
59 unsigned long val;
60 void *ptr = NULL;
61
62 if (!atomic_pool) {
63 WARN(1, "coherent pool not initialised!\n");
64 return NULL;
65 }
66
67 val = gen_pool_alloc(atomic_pool, size);
68 if (val) {
69 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
70
71 *ret_page = phys_to_page(phys);
72 ptr = (void *)val;
Marek Szyprowski6829e272015-04-23 12:46:16 +010073 memset(ptr, 0, size);
Laura Abbottd4932f92014-10-09 15:26:44 -070074 }
75
76 return ptr;
77}
78
79static bool __in_atomic_pool(void *start, size_t size)
80{
81 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
82}
83
84static int __free_from_pool(void *start, size_t size)
85{
86 if (!__in_atomic_pool(start, size))
87 return 0;
88
89 gen_pool_free(atomic_pool, (unsigned long)start, size);
90
91 return 1;
92}
93
Ritesh Harjanibb10eb72014-02-06 17:21:51 +053094static void *__dma_alloc_coherent(struct device *dev, size_t size,
95 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070096 unsigned long attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +000097{
Catalin Marinas19e76402014-02-27 12:09:22 +000098 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
Catalin Marinas09b55412012-03-05 11:49:30 +000099 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
Catalin Marinas19e76402014-02-27 12:09:22 +0000100 flags |= GFP_DMA;
Mel Gormand0164ad2015-11-06 16:28:21 -0800101 if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
Laura Abbott6ac21042013-12-12 19:28:33 +0000102 struct page *page;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000103 void *addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000104
105 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
Lucas Stach712c6042017-02-24 14:58:44 -0800106 get_order(size), flags);
Laura Abbott6ac21042013-12-12 19:28:33 +0000107 if (!page)
108 return NULL;
109
110 *dma_handle = phys_to_dma(dev, page_to_phys(page));
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000111 addr = page_address(page);
Marek Szyprowski6829e272015-04-23 12:46:16 +0100112 memset(addr, 0, size);
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000113 return addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000114 } else {
115 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
116 }
Catalin Marinas09b55412012-03-05 11:49:30 +0000117}
118
Ritesh Harjanibb10eb72014-02-06 17:21:51 +0530119static void __dma_free_coherent(struct device *dev, size_t size,
120 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700121 unsigned long attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +0000122{
Laura Abbottd4932f92014-10-09 15:26:44 -0700123 bool freed;
124 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
125
Laura Abbottc666e8d2013-12-12 19:28:32 +0000126
Laura Abbottd4932f92014-10-09 15:26:44 -0700127 freed = dma_release_from_contiguous(dev,
Laura Abbott6ac21042013-12-12 19:28:33 +0000128 phys_to_page(paddr),
129 size >> PAGE_SHIFT);
Laura Abbottd4932f92014-10-09 15:26:44 -0700130 if (!freed)
Laura Abbott6ac21042013-12-12 19:28:33 +0000131 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
Catalin Marinas09b55412012-03-05 11:49:30 +0000132}
133
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000134static void *__dma_alloc(struct device *dev, size_t size,
135 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700136 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100137{
Laura Abbottd4932f92014-10-09 15:26:44 -0700138 struct page *page;
Catalin Marinas73635902013-05-21 17:35:19 +0100139 void *ptr, *coherent_ptr;
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000140 bool coherent = is_device_dma_coherent(dev);
Robin Murphy97942c22015-07-31 18:28:34 +0100141 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
Catalin Marinas73635902013-05-21 17:35:19 +0100142
143 size = PAGE_ALIGN(size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700144
Mel Gormand0164ad2015-11-06 16:28:21 -0800145 if (!coherent && !gfpflags_allow_blocking(flags)) {
Laura Abbottd4932f92014-10-09 15:26:44 -0700146 struct page *page = NULL;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000147 void *addr = __alloc_from_pool(size, &page, flags);
Laura Abbottd4932f92014-10-09 15:26:44 -0700148
149 if (addr)
150 *dma_handle = phys_to_dma(dev, page_to_phys(page));
151
152 return addr;
Laura Abbottd4932f92014-10-09 15:26:44 -0700153 }
Catalin Marinas73635902013-05-21 17:35:19 +0100154
155 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
156 if (!ptr)
157 goto no_mem;
Catalin Marinas73635902013-05-21 17:35:19 +0100158
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000159 /* no need for non-cacheable mapping if coherent */
160 if (coherent)
161 return ptr;
162
Catalin Marinas73635902013-05-21 17:35:19 +0100163 /* remove any dirty cache lines on the kernel alias */
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900164 __dma_flush_area(ptr, size);
Catalin Marinas73635902013-05-21 17:35:19 +0100165
166 /* create a coherent mapping */
167 page = virt_to_page(ptr);
Laura Abbottd4932f92014-10-09 15:26:44 -0700168 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
Robin Murphy97942c22015-07-31 18:28:34 +0100169 prot, NULL);
Catalin Marinas73635902013-05-21 17:35:19 +0100170 if (!coherent_ptr)
171 goto no_map;
172
173 return coherent_ptr;
174
175no_map:
176 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
177no_mem:
Catalin Marinas73635902013-05-21 17:35:19 +0100178 return NULL;
179}
180
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000181static void __dma_free(struct device *dev, size_t size,
182 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700183 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100184{
185 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
186
Dean Nelson2cff98b2015-04-29 16:09:18 +0100187 size = PAGE_ALIGN(size);
188
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000189 if (!is_device_dma_coherent(dev)) {
190 if (__free_from_pool(vaddr, size))
191 return;
192 vunmap(vaddr);
193 }
Catalin Marinas73635902013-05-21 17:35:19 +0100194 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
195}
196
197static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
198 unsigned long offset, size_t size,
199 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700200 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100201{
202 dma_addr_t dev_addr;
203
204 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100205 if (!is_device_dma_coherent(dev) &&
206 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000207 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100208
209 return dev_addr;
210}
211
212
213static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
214 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700215 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100216{
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100217 if (!is_device_dma_coherent(dev) &&
218 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000219 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100220 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
221}
222
223static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
224 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700225 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100226{
227 struct scatterlist *sg;
228 int i, ret;
229
230 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100231 if (!is_device_dma_coherent(dev) &&
232 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000233 for_each_sg(sgl, sg, ret, i)
234 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
235 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100236
237 return ret;
238}
239
240static void __swiotlb_unmap_sg_attrs(struct device *dev,
241 struct scatterlist *sgl, int nelems,
242 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700243 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100244{
245 struct scatterlist *sg;
246 int i;
247
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100248 if (!is_device_dma_coherent(dev) &&
249 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000250 for_each_sg(sgl, sg, nelems, i)
251 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
252 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100253 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
254}
255
256static void __swiotlb_sync_single_for_cpu(struct device *dev,
257 dma_addr_t dev_addr, size_t size,
258 enum dma_data_direction dir)
259{
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000260 if (!is_device_dma_coherent(dev))
261 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100262 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
263}
264
265static void __swiotlb_sync_single_for_device(struct device *dev,
266 dma_addr_t dev_addr, size_t size,
267 enum dma_data_direction dir)
268{
269 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000270 if (!is_device_dma_coherent(dev))
271 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100272}
273
274static void __swiotlb_sync_sg_for_cpu(struct device *dev,
275 struct scatterlist *sgl, int nelems,
276 enum dma_data_direction dir)
277{
278 struct scatterlist *sg;
279 int i;
280
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000281 if (!is_device_dma_coherent(dev))
282 for_each_sg(sgl, sg, nelems, i)
283 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
284 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100285 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
286}
287
288static void __swiotlb_sync_sg_for_device(struct device *dev,
289 struct scatterlist *sgl, int nelems,
290 enum dma_data_direction dir)
291{
292 struct scatterlist *sg;
293 int i;
294
295 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000296 if (!is_device_dma_coherent(dev))
297 for_each_sg(sgl, sg, nelems, i)
298 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
299 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100300}
301
Catalin Marinas92f66f82017-04-25 15:42:31 +0100302static int __swiotlb_mmap_pfn(struct vm_area_struct *vma,
303 unsigned long pfn, size_t size)
Laura Abbott6e8d7962014-03-14 19:52:23 +0000304{
305 int ret = -ENXIO;
Thomas Meyerb4f4a272017-09-21 00:29:36 +0200306 unsigned long nr_vma_pages = vma_pages(vma);
Laura Abbott6e8d7962014-03-14 19:52:23 +0000307 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
Laura Abbott6e8d7962014-03-14 19:52:23 +0000308 unsigned long off = vma->vm_pgoff;
309
Laura Abbott6e8d7962014-03-14 19:52:23 +0000310 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
311 ret = remap_pfn_range(vma, vma->vm_start,
312 pfn + off,
313 vma->vm_end - vma->vm_start,
314 vma->vm_page_prot);
315 }
316
317 return ret;
318}
319
Catalin Marinas92f66f82017-04-25 15:42:31 +0100320static int __swiotlb_mmap(struct device *dev,
321 struct vm_area_struct *vma,
322 void *cpu_addr, dma_addr_t dma_addr, size_t size,
323 unsigned long attrs)
324{
325 int ret;
326 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
327
328 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
329 is_device_dma_coherent(dev));
330
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100331 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
Catalin Marinas92f66f82017-04-25 15:42:31 +0100332 return ret;
333
334 return __swiotlb_mmap_pfn(vma, pfn, size);
335}
336
337static int __swiotlb_get_sgtable_page(struct sg_table *sgt,
338 struct page *page, size_t size)
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100339{
340 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
341
342 if (!ret)
Catalin Marinas92f66f82017-04-25 15:42:31 +0100343 sg_set_page(sgt->sgl, page, PAGE_ALIGN(size), 0);
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100344
345 return ret;
346}
347
Catalin Marinas92f66f82017-04-25 15:42:31 +0100348static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
349 void *cpu_addr, dma_addr_t handle, size_t size,
350 unsigned long attrs)
351{
352 struct page *page = phys_to_page(dma_to_phys(dev, handle));
353
354 return __swiotlb_get_sgtable_page(sgt, page, size);
355}
356
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800357static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
358{
359 if (swiotlb)
360 return swiotlb_dma_supported(hwdev, mask);
361 return 1;
362}
363
Robin Murphyadbe7e22017-01-25 18:31:31 +0000364static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
365{
366 if (swiotlb)
367 return swiotlb_dma_mapping_error(hwdev, addr);
368 return 0;
369}
370
Bart Van Assche52997092017-01-20 13:04:01 -0800371static const struct dma_map_ops swiotlb_dma_ops = {
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000372 .alloc = __dma_alloc,
373 .free = __dma_free,
374 .mmap = __swiotlb_mmap,
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100375 .get_sgtable = __swiotlb_get_sgtable,
Catalin Marinas73635902013-05-21 17:35:19 +0100376 .map_page = __swiotlb_map_page,
377 .unmap_page = __swiotlb_unmap_page,
378 .map_sg = __swiotlb_map_sg_attrs,
379 .unmap_sg = __swiotlb_unmap_sg_attrs,
380 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
381 .sync_single_for_device = __swiotlb_sync_single_for_device,
382 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
383 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800384 .dma_supported = __swiotlb_dma_supported,
Robin Murphyadbe7e22017-01-25 18:31:31 +0000385 .mapping_error = __swiotlb_dma_mapping_error,
Catalin Marinas73635902013-05-21 17:35:19 +0100386};
Catalin Marinas09b55412012-03-05 11:49:30 +0000387
Laura Abbottd4932f92014-10-09 15:26:44 -0700388static int __init atomic_pool_init(void)
389{
390 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
391 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
392 struct page *page;
393 void *addr;
394 unsigned int pool_size_order = get_order(atomic_pool_size);
395
396 if (dev_get_cma_area(NULL))
397 page = dma_alloc_from_contiguous(NULL, nr_pages,
Lucas Stach712c6042017-02-24 14:58:44 -0800398 pool_size_order, GFP_KERNEL);
Laura Abbottd4932f92014-10-09 15:26:44 -0700399 else
400 page = alloc_pages(GFP_DMA, pool_size_order);
401
402 if (page) {
403 int ret;
404 void *page_addr = page_address(page);
405
406 memset(page_addr, 0, atomic_pool_size);
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900407 __dma_flush_area(page_addr, atomic_pool_size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700408
409 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
410 if (!atomic_pool)
411 goto free_page;
412
413 addr = dma_common_contiguous_remap(page, atomic_pool_size,
414 VM_USERMAP, prot, atomic_pool_init);
415
416 if (!addr)
417 goto destroy_genpool;
418
419 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
420 page_to_phys(page),
421 atomic_pool_size, -1);
422 if (ret)
423 goto remove_mapping;
424
425 gen_pool_set_algo(atomic_pool,
426 gen_pool_first_fit_order_align,
Vladimir Murzin2fa59ec2017-08-14 09:55:46 +0100427 NULL);
Laura Abbottd4932f92014-10-09 15:26:44 -0700428
429 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
430 atomic_pool_size / 1024);
431 return 0;
432 }
433 goto out;
434
435remove_mapping:
436 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
437destroy_genpool:
438 gen_pool_destroy(atomic_pool);
439 atomic_pool = NULL;
440free_page:
441 if (!dma_release_from_contiguous(NULL, page, nr_pages))
442 __free_pages(page, pool_size_order);
443out:
444 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
445 atomic_pool_size / 1024);
446 return -ENOMEM;
447}
448
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500449/********************************************
450 * The following APIs are for dummy DMA ops *
451 ********************************************/
452
453static void *__dummy_alloc(struct device *dev, size_t size,
454 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700455 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500456{
457 return NULL;
458}
459
460static void __dummy_free(struct device *dev, size_t size,
461 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700462 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500463{
464}
465
466static int __dummy_mmap(struct device *dev,
467 struct vm_area_struct *vma,
468 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700469 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500470{
471 return -ENXIO;
472}
473
474static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
475 unsigned long offset, size_t size,
476 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700477 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500478{
Christoph Hellwige0d60ac2017-05-21 12:21:50 +0200479 return 0;
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500480}
481
482static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
483 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700484 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500485{
486}
487
488static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
489 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700490 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500491{
492 return 0;
493}
494
495static void __dummy_unmap_sg(struct device *dev,
496 struct scatterlist *sgl, int nelems,
497 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700498 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500499{
500}
501
502static void __dummy_sync_single(struct device *dev,
503 dma_addr_t dev_addr, size_t size,
504 enum dma_data_direction dir)
505{
506}
507
508static void __dummy_sync_sg(struct device *dev,
509 struct scatterlist *sgl, int nelems,
510 enum dma_data_direction dir)
511{
512}
513
514static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
515{
516 return 1;
517}
518
519static int __dummy_dma_supported(struct device *hwdev, u64 mask)
520{
521 return 0;
522}
523
Bart Van Assche52997092017-01-20 13:04:01 -0800524const struct dma_map_ops dummy_dma_ops = {
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500525 .alloc = __dummy_alloc,
526 .free = __dummy_free,
527 .mmap = __dummy_mmap,
528 .map_page = __dummy_map_page,
529 .unmap_page = __dummy_unmap_page,
530 .map_sg = __dummy_map_sg,
531 .unmap_sg = __dummy_unmap_sg,
532 .sync_single_for_cpu = __dummy_sync_single,
533 .sync_single_for_device = __dummy_sync_single,
534 .sync_sg_for_cpu = __dummy_sync_sg,
535 .sync_sg_for_device = __dummy_sync_sg,
536 .mapping_error = __dummy_mapping_error,
537 .dma_supported = __dummy_dma_supported,
538};
539EXPORT_SYMBOL(dummy_dma_ops);
540
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000541static int __init arm64_dma_init(void)
Catalin Marinas09b55412012-03-05 11:49:30 +0000542{
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +0100543 if (swiotlb_force == SWIOTLB_FORCE ||
544 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800545 swiotlb = 1;
546
Arnd Bergmann1dccb592015-11-16 17:25:48 +0100547 return atomic_pool_init();
Laura Abbottd4932f92014-10-09 15:26:44 -0700548}
549arch_initcall(arm64_dma_init);
Catalin Marinas09b55412012-03-05 11:49:30 +0000550
551#define PREALLOC_DMA_DEBUG_ENTRIES 4096
552
553static int __init dma_debug_do_init(void)
554{
555 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
556 return 0;
557}
558fs_initcall(dma_debug_do_init);
Robin Murphy13b86292015-10-01 20:13:59 +0100559
560
561#ifdef CONFIG_IOMMU_DMA
562#include <linux/dma-iommu.h>
563#include <linux/platform_device.h>
564#include <linux/amba/bus.h>
565
566/* Thankfully, all cache ops are by VA so we can ignore phys here */
567static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
568{
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900569 __dma_flush_area(virt, PAGE_SIZE);
Robin Murphy13b86292015-10-01 20:13:59 +0100570}
571
572static void *__iommu_alloc_attrs(struct device *dev, size_t size,
573 dma_addr_t *handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700574 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100575{
576 bool coherent = is_device_dma_coherent(dev);
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530577 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000578 size_t iosize = size;
Robin Murphy13b86292015-10-01 20:13:59 +0100579 void *addr;
580
581 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
582 return NULL;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000583
584 size = PAGE_ALIGN(size);
585
Robin Murphy13b86292015-10-01 20:13:59 +0100586 /*
587 * Some drivers rely on this, and we probably don't want the
588 * possibility of stale kernel data being read by devices anyway.
589 */
590 gfp |= __GFP_ZERO;
591
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100592 if (!gfpflags_allow_blocking(gfp)) {
Robin Murphy13b86292015-10-01 20:13:59 +0100593 struct page *page;
594 /*
595 * In atomic context we can't remap anything, so we'll only
596 * get the virtually contiguous buffer we need by way of a
597 * physically contiguous allocation.
598 */
599 if (coherent) {
600 page = alloc_pages(gfp, get_order(size));
601 addr = page ? page_address(page) : NULL;
602 } else {
603 addr = __alloc_from_pool(size, &page, gfp);
604 }
605 if (!addr)
606 return NULL;
607
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000608 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
Robin Murphy13b86292015-10-01 20:13:59 +0100609 if (iommu_dma_mapping_error(dev, *handle)) {
610 if (coherent)
611 __free_pages(page, get_order(size));
612 else
613 __free_from_pool(addr, size);
614 addr = NULL;
615 }
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100616 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
617 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
618 struct page *page;
619
620 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
621 get_order(size), gfp);
622 if (!page)
623 return NULL;
624
625 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
626 if (iommu_dma_mapping_error(dev, *handle)) {
627 dma_release_from_contiguous(dev, page,
628 size >> PAGE_SHIFT);
629 return NULL;
630 }
631 if (!coherent)
632 __dma_flush_area(page_to_virt(page), iosize);
633
634 addr = dma_common_contiguous_remap(page, size, VM_USERMAP,
635 prot,
636 __builtin_return_address(0));
637 if (!addr) {
638 iommu_dma_unmap_page(dev, *handle, iosize, 0, attrs);
639 dma_release_from_contiguous(dev, page,
640 size >> PAGE_SHIFT);
641 }
642 } else {
643 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
644 struct page **pages;
645
646 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
647 handle, flush_page);
648 if (!pages)
649 return NULL;
650
651 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
652 __builtin_return_address(0));
653 if (!addr)
654 iommu_dma_free(dev, pages, iosize, handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100655 }
656 return addr;
657}
658
659static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700660 dma_addr_t handle, unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100661{
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000662 size_t iosize = size;
663
664 size = PAGE_ALIGN(size);
Robin Murphy13b86292015-10-01 20:13:59 +0100665 /*
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100666 * @cpu_addr will be one of 4 things depending on how it was allocated:
667 * - A remapped array of pages for contiguous allocations.
Robin Murphy13b86292015-10-01 20:13:59 +0100668 * - A remapped array of pages from iommu_dma_alloc(), for all
669 * non-atomic allocations.
670 * - A non-cacheable alias from the atomic pool, for atomic
671 * allocations by non-coherent devices.
672 * - A normal lowmem address, for atomic allocations by
673 * coherent devices.
674 * Hence how dodgy the below logic looks...
675 */
676 if (__in_atomic_pool(cpu_addr, size)) {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700677 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
Robin Murphy13b86292015-10-01 20:13:59 +0100678 __free_from_pool(cpu_addr, size);
Geert Uytterhoeven44176bb2017-03-07 18:43:32 +0100679 } else if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
680 struct page *page = vmalloc_to_page(cpu_addr);
681
682 iommu_dma_unmap_page(dev, handle, iosize, 0, attrs);
683 dma_release_from_contiguous(dev, page, size >> PAGE_SHIFT);
684 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
Robin Murphy13b86292015-10-01 20:13:59 +0100685 } else if (is_vmalloc_addr(cpu_addr)){
686 struct vm_struct *area = find_vm_area(cpu_addr);
687
688 if (WARN_ON(!area || !area->pages))
689 return;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000690 iommu_dma_free(dev, area->pages, iosize, &handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100691 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
692 } else {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700693 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
Robin Murphy13b86292015-10-01 20:13:59 +0100694 __free_pages(virt_to_page(cpu_addr), get_order(size));
695 }
696}
697
698static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
699 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700700 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100701{
702 struct vm_struct *area;
703 int ret;
704
705 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
706 is_device_dma_coherent(dev));
707
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100708 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
Robin Murphy13b86292015-10-01 20:13:59 +0100709 return ret;
710
Catalin Marinas92f66f82017-04-25 15:42:31 +0100711 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
712 /*
713 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
714 * hence in the vmalloc space.
715 */
716 unsigned long pfn = vmalloc_to_pfn(cpu_addr);
717 return __swiotlb_mmap_pfn(vma, pfn, size);
718 }
719
Robin Murphy13b86292015-10-01 20:13:59 +0100720 area = find_vm_area(cpu_addr);
721 if (WARN_ON(!area || !area->pages))
722 return -ENXIO;
723
724 return iommu_dma_mmap(area->pages, size, vma);
725}
726
727static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
728 void *cpu_addr, dma_addr_t dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700729 size_t size, unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100730{
731 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
732 struct vm_struct *area = find_vm_area(cpu_addr);
733
Catalin Marinas92f66f82017-04-25 15:42:31 +0100734 if (attrs & DMA_ATTR_FORCE_CONTIGUOUS) {
735 /*
736 * DMA_ATTR_FORCE_CONTIGUOUS allocations are always remapped,
737 * hence in the vmalloc space.
738 */
739 struct page *page = vmalloc_to_page(cpu_addr);
740 return __swiotlb_get_sgtable_page(sgt, page, size);
741 }
742
Robin Murphy13b86292015-10-01 20:13:59 +0100743 if (WARN_ON(!area || !area->pages))
744 return -ENXIO;
745
746 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
747 GFP_KERNEL);
748}
749
750static void __iommu_sync_single_for_cpu(struct device *dev,
751 dma_addr_t dev_addr, size_t size,
752 enum dma_data_direction dir)
753{
754 phys_addr_t phys;
755
756 if (is_device_dma_coherent(dev))
757 return;
758
759 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
760 __dma_unmap_area(phys_to_virt(phys), size, dir);
761}
762
763static void __iommu_sync_single_for_device(struct device *dev,
764 dma_addr_t dev_addr, size_t size,
765 enum dma_data_direction dir)
766{
767 phys_addr_t phys;
768
769 if (is_device_dma_coherent(dev))
770 return;
771
772 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
773 __dma_map_area(phys_to_virt(phys), size, dir);
774}
775
776static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
777 unsigned long offset, size_t size,
778 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700779 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100780{
781 bool coherent = is_device_dma_coherent(dev);
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530782 int prot = dma_info_to_prot(dir, coherent, attrs);
Robin Murphy13b86292015-10-01 20:13:59 +0100783 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
784
785 if (!iommu_dma_mapping_error(dev, dev_addr) &&
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700786 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100787 __iommu_sync_single_for_device(dev, dev_addr, size, dir);
788
789 return dev_addr;
790}
791
792static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
793 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700794 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100795{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700796 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100797 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
798
799 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
800}
801
802static void __iommu_sync_sg_for_cpu(struct device *dev,
803 struct scatterlist *sgl, int nelems,
804 enum dma_data_direction dir)
805{
806 struct scatterlist *sg;
807 int i;
808
809 if (is_device_dma_coherent(dev))
810 return;
811
812 for_each_sg(sgl, sg, nelems, i)
813 __dma_unmap_area(sg_virt(sg), sg->length, dir);
814}
815
816static void __iommu_sync_sg_for_device(struct device *dev,
817 struct scatterlist *sgl, int nelems,
818 enum dma_data_direction dir)
819{
820 struct scatterlist *sg;
821 int i;
822
823 if (is_device_dma_coherent(dev))
824 return;
825
826 for_each_sg(sgl, sg, nelems, i)
827 __dma_map_area(sg_virt(sg), sg->length, dir);
828}
829
830static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
831 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700832 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100833{
834 bool coherent = is_device_dma_coherent(dev);
835
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700836 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100837 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
838
839 return iommu_dma_map_sg(dev, sgl, nelems,
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530840 dma_info_to_prot(dir, coherent, attrs));
Robin Murphy13b86292015-10-01 20:13:59 +0100841}
842
843static void __iommu_unmap_sg_attrs(struct device *dev,
844 struct scatterlist *sgl, int nelems,
845 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700846 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100847{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700848 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100849 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
850
851 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
852}
853
Bart Van Assche52997092017-01-20 13:04:01 -0800854static const struct dma_map_ops iommu_dma_ops = {
Robin Murphy13b86292015-10-01 20:13:59 +0100855 .alloc = __iommu_alloc_attrs,
856 .free = __iommu_free_attrs,
857 .mmap = __iommu_mmap_attrs,
858 .get_sgtable = __iommu_get_sgtable,
859 .map_page = __iommu_map_page,
860 .unmap_page = __iommu_unmap_page,
861 .map_sg = __iommu_map_sg_attrs,
862 .unmap_sg = __iommu_unmap_sg_attrs,
863 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
864 .sync_single_for_device = __iommu_sync_single_for_device,
865 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
866 .sync_sg_for_device = __iommu_sync_sg_for_device,
Robin Murphy60c4e802016-11-14 12:16:27 +0000867 .map_resource = iommu_dma_map_resource,
868 .unmap_resource = iommu_dma_unmap_resource,
Robin Murphy13b86292015-10-01 20:13:59 +0100869 .mapping_error = iommu_dma_mapping_error,
870};
871
Sricharan Rb913efe2017-04-10 16:51:04 +0530872static int __init __iommu_dma_init(void)
Robin Murphy13b86292015-10-01 20:13:59 +0100873{
Sricharan Rb913efe2017-04-10 16:51:04 +0530874 return iommu_dma_init();
875}
876arch_initcall(__iommu_dma_init);
877
878static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
879 const struct iommu_ops *ops)
880{
881 struct iommu_domain *domain;
882
883 if (!ops)
884 return;
Robin Murphy13b86292015-10-01 20:13:59 +0100885
886 /*
Sricharan Rb913efe2017-04-10 16:51:04 +0530887 * The IOMMU core code allocates the default DMA domain, which the
888 * underlying IOMMU driver needs to support via the dma-iommu layer.
Robin Murphy13b86292015-10-01 20:13:59 +0100889 */
Sricharan Rb913efe2017-04-10 16:51:04 +0530890 domain = iommu_get_domain_for_dev(dev);
891
Will Deacon4a8d8a12017-01-06 10:49:12 +0000892 if (!domain)
893 goto out_err;
894
895 if (domain->type == IOMMU_DOMAIN_DMA) {
896 if (iommu_dma_init_domain(domain, dma_base, size, dev))
897 goto out_err;
898
Linus Torvaldsac1820f2017-02-25 13:45:43 -0800899 dev->dma_ops = &iommu_dma_ops;
Robin Murphy13b86292015-10-01 20:13:59 +0100900 }
901
Sricharan Rb913efe2017-04-10 16:51:04 +0530902 return;
903
Will Deacon4a8d8a12017-01-06 10:49:12 +0000904out_err:
Sricharan Rb913efe2017-04-10 16:51:04 +0530905 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
Will Deacon4a8d8a12017-01-06 10:49:12 +0000906 dev_name(dev));
Robin Murphy13b86292015-10-01 20:13:59 +0100907}
908
Robin Murphy876945d2015-10-01 20:14:00 +0100909void arch_teardown_dma_ops(struct device *dev)
910{
Bart Van Assche56579332017-01-20 13:04:02 -0800911 dev->dma_ops = NULL;
Robin Murphy876945d2015-10-01 20:14:00 +0100912}
913
Robin Murphy13b86292015-10-01 20:13:59 +0100914#else
915
916static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
Robin Murphy53c92d72016-04-07 18:42:05 +0100917 const struct iommu_ops *iommu)
Robin Murphy13b86292015-10-01 20:13:59 +0100918{ }
919
920#endif /* CONFIG_IOMMU_DMA */
921
Robin Murphy876945d2015-10-01 20:14:00 +0100922void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
Robin Murphy53c92d72016-04-07 18:42:05 +0100923 const struct iommu_ops *iommu, bool coherent)
Robin Murphy876945d2015-10-01 20:14:00 +0100924{
Bart Van Assche56579332017-01-20 13:04:02 -0800925 if (!dev->dma_ops)
926 dev->dma_ops = &swiotlb_dma_ops;
Robin Murphy876945d2015-10-01 20:14:00 +0100927
928 dev->archdata.dma_coherent = coherent;
929 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
Stefano Stabellinie0586322017-04-13 14:04:21 -0700930
931#ifdef CONFIG_XEN
932 if (xen_initial_domain()) {
933 dev->archdata.dev_dma_ops = dev->dma_ops;
934 dev->dma_ops = xen_dma_ops;
935 }
936#endif
Robin Murphy876945d2015-10-01 20:14:00 +0100937}