blob: 982f85b8162420cc31e4f1114938e1bbf02b097d [file] [log] [blame]
Catalin Marinas09b55412012-03-05 11:49:30 +00001/*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/gfp.h>
Arnd Bergmann1dccb592015-11-16 17:25:48 +010021#include <linux/acpi.h>
Jisheng Zhangb67a8b22016-06-08 15:53:46 +080022#include <linux/bootmem.h>
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080023#include <linux/cache.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000024#include <linux/export.h>
25#include <linux/slab.h>
Laura Abbottd4932f92014-10-09 15:26:44 -070026#include <linux/genalloc.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000027#include <linux/dma-mapping.h>
Laura Abbott6ac21042013-12-12 19:28:33 +000028#include <linux/dma-contiguous.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000029#include <linux/vmalloc.h>
30#include <linux/swiotlb.h>
Joerg Roedel461a6942017-04-26 15:46:20 +020031#include <linux/pci.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000032
33#include <asm/cacheflush.h>
34
Jisheng Zhang5a9e3e12016-08-15 14:45:46 +080035static int swiotlb __ro_after_init;
Jisheng Zhangb67a8b22016-06-08 15:53:46 +080036
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070037static pgprot_t __get_dma_pgprot(unsigned long attrs, pgprot_t prot,
Laura Abbott214fdbe2014-03-14 19:52:24 +000038 bool coherent)
39{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070040 if (!coherent || (attrs & DMA_ATTR_WRITE_COMBINE))
Laura Abbott214fdbe2014-03-14 19:52:24 +000041 return pgprot_writecombine(prot);
Laura Abbott214fdbe2014-03-14 19:52:24 +000042 return prot;
43}
44
Laura Abbottd4932f92014-10-09 15:26:44 -070045static struct gen_pool *atomic_pool;
46
47#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
Jisheng Zhanga7c61a32015-11-20 17:59:10 +080048static size_t atomic_pool_size __initdata = DEFAULT_DMA_COHERENT_POOL_SIZE;
Laura Abbottd4932f92014-10-09 15:26:44 -070049
50static int __init early_coherent_pool(char *p)
51{
52 atomic_pool_size = memparse(p, &p);
53 return 0;
54}
55early_param("coherent_pool", early_coherent_pool);
56
Suzuki K. Poulose71328132015-03-19 18:17:09 +000057static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
Laura Abbottd4932f92014-10-09 15:26:44 -070058{
59 unsigned long val;
60 void *ptr = NULL;
61
62 if (!atomic_pool) {
63 WARN(1, "coherent pool not initialised!\n");
64 return NULL;
65 }
66
67 val = gen_pool_alloc(atomic_pool, size);
68 if (val) {
69 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
70
71 *ret_page = phys_to_page(phys);
72 ptr = (void *)val;
Marek Szyprowski6829e272015-04-23 12:46:16 +010073 memset(ptr, 0, size);
Laura Abbottd4932f92014-10-09 15:26:44 -070074 }
75
76 return ptr;
77}
78
79static bool __in_atomic_pool(void *start, size_t size)
80{
81 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
82}
83
84static int __free_from_pool(void *start, size_t size)
85{
86 if (!__in_atomic_pool(start, size))
87 return 0;
88
89 gen_pool_free(atomic_pool, (unsigned long)start, size);
90
91 return 1;
92}
93
Ritesh Harjanibb10eb72014-02-06 17:21:51 +053094static void *__dma_alloc_coherent(struct device *dev, size_t size,
95 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070096 unsigned long attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +000097{
Laura Abbottc666e8d2013-12-12 19:28:32 +000098 if (dev == NULL) {
99 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
100 return NULL;
101 }
102
Catalin Marinas19e76402014-02-27 12:09:22 +0000103 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
Catalin Marinas09b55412012-03-05 11:49:30 +0000104 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
Catalin Marinas19e76402014-02-27 12:09:22 +0000105 flags |= GFP_DMA;
Mel Gormand0164ad2015-11-06 16:28:21 -0800106 if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
Laura Abbott6ac21042013-12-12 19:28:33 +0000107 struct page *page;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000108 void *addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000109
110 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
Lucas Stach712c6042017-02-24 14:58:44 -0800111 get_order(size), flags);
Laura Abbott6ac21042013-12-12 19:28:33 +0000112 if (!page)
113 return NULL;
114
115 *dma_handle = phys_to_dma(dev, page_to_phys(page));
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000116 addr = page_address(page);
Marek Szyprowski6829e272015-04-23 12:46:16 +0100117 memset(addr, 0, size);
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000118 return addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000119 } else {
120 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
121 }
Catalin Marinas09b55412012-03-05 11:49:30 +0000122}
123
Ritesh Harjanibb10eb72014-02-06 17:21:51 +0530124static void __dma_free_coherent(struct device *dev, size_t size,
125 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700126 unsigned long attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +0000127{
Laura Abbottd4932f92014-10-09 15:26:44 -0700128 bool freed;
129 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
130
Laura Abbottc666e8d2013-12-12 19:28:32 +0000131 if (dev == NULL) {
132 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
133 return;
134 }
135
Laura Abbottd4932f92014-10-09 15:26:44 -0700136 freed = dma_release_from_contiguous(dev,
Laura Abbott6ac21042013-12-12 19:28:33 +0000137 phys_to_page(paddr),
138 size >> PAGE_SHIFT);
Laura Abbottd4932f92014-10-09 15:26:44 -0700139 if (!freed)
Laura Abbott6ac21042013-12-12 19:28:33 +0000140 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
Catalin Marinas09b55412012-03-05 11:49:30 +0000141}
142
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000143static void *__dma_alloc(struct device *dev, size_t size,
144 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700145 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100146{
Laura Abbottd4932f92014-10-09 15:26:44 -0700147 struct page *page;
Catalin Marinas73635902013-05-21 17:35:19 +0100148 void *ptr, *coherent_ptr;
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000149 bool coherent = is_device_dma_coherent(dev);
Robin Murphy97942c22015-07-31 18:28:34 +0100150 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
Catalin Marinas73635902013-05-21 17:35:19 +0100151
152 size = PAGE_ALIGN(size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700153
Mel Gormand0164ad2015-11-06 16:28:21 -0800154 if (!coherent && !gfpflags_allow_blocking(flags)) {
Laura Abbottd4932f92014-10-09 15:26:44 -0700155 struct page *page = NULL;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000156 void *addr = __alloc_from_pool(size, &page, flags);
Laura Abbottd4932f92014-10-09 15:26:44 -0700157
158 if (addr)
159 *dma_handle = phys_to_dma(dev, page_to_phys(page));
160
161 return addr;
Laura Abbottd4932f92014-10-09 15:26:44 -0700162 }
Catalin Marinas73635902013-05-21 17:35:19 +0100163
164 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
165 if (!ptr)
166 goto no_mem;
Catalin Marinas73635902013-05-21 17:35:19 +0100167
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000168 /* no need for non-cacheable mapping if coherent */
169 if (coherent)
170 return ptr;
171
Catalin Marinas73635902013-05-21 17:35:19 +0100172 /* remove any dirty cache lines on the kernel alias */
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900173 __dma_flush_area(ptr, size);
Catalin Marinas73635902013-05-21 17:35:19 +0100174
175 /* create a coherent mapping */
176 page = virt_to_page(ptr);
Laura Abbottd4932f92014-10-09 15:26:44 -0700177 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
Robin Murphy97942c22015-07-31 18:28:34 +0100178 prot, NULL);
Catalin Marinas73635902013-05-21 17:35:19 +0100179 if (!coherent_ptr)
180 goto no_map;
181
182 return coherent_ptr;
183
184no_map:
185 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
186no_mem:
Sean Paula52ce122014-10-01 16:31:50 +0100187 *dma_handle = DMA_ERROR_CODE;
Catalin Marinas73635902013-05-21 17:35:19 +0100188 return NULL;
189}
190
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000191static void __dma_free(struct device *dev, size_t size,
192 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700193 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100194{
195 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
196
Dean Nelson2cff98b2015-04-29 16:09:18 +0100197 size = PAGE_ALIGN(size);
198
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000199 if (!is_device_dma_coherent(dev)) {
200 if (__free_from_pool(vaddr, size))
201 return;
202 vunmap(vaddr);
203 }
Catalin Marinas73635902013-05-21 17:35:19 +0100204 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
205}
206
207static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
208 unsigned long offset, size_t size,
209 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700210 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100211{
212 dma_addr_t dev_addr;
213
214 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100215 if (!is_device_dma_coherent(dev) &&
216 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000217 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100218
219 return dev_addr;
220}
221
222
223static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
224 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700225 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100226{
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100227 if (!is_device_dma_coherent(dev) &&
228 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000229 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100230 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
231}
232
233static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
234 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700235 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100236{
237 struct scatterlist *sg;
238 int i, ret;
239
240 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100241 if (!is_device_dma_coherent(dev) &&
242 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000243 for_each_sg(sgl, sg, ret, i)
244 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
245 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100246
247 return ret;
248}
249
250static void __swiotlb_unmap_sg_attrs(struct device *dev,
251 struct scatterlist *sgl, int nelems,
252 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700253 unsigned long attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100254{
255 struct scatterlist *sg;
256 int i;
257
Takeshi Kihara7f332fc2017-01-11 11:11:17 +0100258 if (!is_device_dma_coherent(dev) &&
259 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000260 for_each_sg(sgl, sg, nelems, i)
261 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
262 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100263 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
264}
265
266static void __swiotlb_sync_single_for_cpu(struct device *dev,
267 dma_addr_t dev_addr, size_t size,
268 enum dma_data_direction dir)
269{
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000270 if (!is_device_dma_coherent(dev))
271 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100272 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
273}
274
275static void __swiotlb_sync_single_for_device(struct device *dev,
276 dma_addr_t dev_addr, size_t size,
277 enum dma_data_direction dir)
278{
279 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000280 if (!is_device_dma_coherent(dev))
281 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100282}
283
284static void __swiotlb_sync_sg_for_cpu(struct device *dev,
285 struct scatterlist *sgl, int nelems,
286 enum dma_data_direction dir)
287{
288 struct scatterlist *sg;
289 int i;
290
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000291 if (!is_device_dma_coherent(dev))
292 for_each_sg(sgl, sg, nelems, i)
293 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
294 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100295 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
296}
297
298static void __swiotlb_sync_sg_for_device(struct device *dev,
299 struct scatterlist *sgl, int nelems,
300 enum dma_data_direction dir)
301{
302 struct scatterlist *sg;
303 int i;
304
305 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000306 if (!is_device_dma_coherent(dev))
307 for_each_sg(sgl, sg, nelems, i)
308 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
309 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100310}
311
Robin Murphyaaf6f2f2015-07-10 16:47:56 +0100312static int __swiotlb_mmap(struct device *dev,
313 struct vm_area_struct *vma,
314 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700315 unsigned long attrs)
Laura Abbott6e8d7962014-03-14 19:52:23 +0000316{
317 int ret = -ENXIO;
318 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
319 PAGE_SHIFT;
320 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
321 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
322 unsigned long off = vma->vm_pgoff;
323
Robin Murphyaaf6f2f2015-07-10 16:47:56 +0100324 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
325 is_device_dma_coherent(dev));
326
Laura Abbott6e8d7962014-03-14 19:52:23 +0000327 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
328 return ret;
329
330 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
331 ret = remap_pfn_range(vma, vma->vm_start,
332 pfn + off,
333 vma->vm_end - vma->vm_start,
334 vma->vm_page_prot);
335 }
336
337 return ret;
338}
339
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100340static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
341 void *cpu_addr, dma_addr_t handle, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700342 unsigned long attrs)
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100343{
344 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
345
346 if (!ret)
347 sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
348 PAGE_ALIGN(size), 0);
349
350 return ret;
351}
352
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800353static int __swiotlb_dma_supported(struct device *hwdev, u64 mask)
354{
355 if (swiotlb)
356 return swiotlb_dma_supported(hwdev, mask);
357 return 1;
358}
359
Robin Murphyadbe7e22017-01-25 18:31:31 +0000360static int __swiotlb_dma_mapping_error(struct device *hwdev, dma_addr_t addr)
361{
362 if (swiotlb)
363 return swiotlb_dma_mapping_error(hwdev, addr);
364 return 0;
365}
366
Bart Van Assche52997092017-01-20 13:04:01 -0800367static const struct dma_map_ops swiotlb_dma_ops = {
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000368 .alloc = __dma_alloc,
369 .free = __dma_free,
370 .mmap = __swiotlb_mmap,
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100371 .get_sgtable = __swiotlb_get_sgtable,
Catalin Marinas73635902013-05-21 17:35:19 +0100372 .map_page = __swiotlb_map_page,
373 .unmap_page = __swiotlb_unmap_page,
374 .map_sg = __swiotlb_map_sg_attrs,
375 .unmap_sg = __swiotlb_unmap_sg_attrs,
376 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
377 .sync_single_for_device = __swiotlb_sync_single_for_device,
378 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
379 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800380 .dma_supported = __swiotlb_dma_supported,
Robin Murphyadbe7e22017-01-25 18:31:31 +0000381 .mapping_error = __swiotlb_dma_mapping_error,
Catalin Marinas73635902013-05-21 17:35:19 +0100382};
Catalin Marinas09b55412012-03-05 11:49:30 +0000383
Laura Abbottd4932f92014-10-09 15:26:44 -0700384static int __init atomic_pool_init(void)
385{
386 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
387 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
388 struct page *page;
389 void *addr;
390 unsigned int pool_size_order = get_order(atomic_pool_size);
391
392 if (dev_get_cma_area(NULL))
393 page = dma_alloc_from_contiguous(NULL, nr_pages,
Lucas Stach712c6042017-02-24 14:58:44 -0800394 pool_size_order, GFP_KERNEL);
Laura Abbottd4932f92014-10-09 15:26:44 -0700395 else
396 page = alloc_pages(GFP_DMA, pool_size_order);
397
398 if (page) {
399 int ret;
400 void *page_addr = page_address(page);
401
402 memset(page_addr, 0, atomic_pool_size);
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900403 __dma_flush_area(page_addr, atomic_pool_size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700404
405 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
406 if (!atomic_pool)
407 goto free_page;
408
409 addr = dma_common_contiguous_remap(page, atomic_pool_size,
410 VM_USERMAP, prot, atomic_pool_init);
411
412 if (!addr)
413 goto destroy_genpool;
414
415 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
416 page_to_phys(page),
417 atomic_pool_size, -1);
418 if (ret)
419 goto remove_mapping;
420
421 gen_pool_set_algo(atomic_pool,
422 gen_pool_first_fit_order_align,
423 (void *)PAGE_SHIFT);
424
425 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
426 atomic_pool_size / 1024);
427 return 0;
428 }
429 goto out;
430
431remove_mapping:
432 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
433destroy_genpool:
434 gen_pool_destroy(atomic_pool);
435 atomic_pool = NULL;
436free_page:
437 if (!dma_release_from_contiguous(NULL, page, nr_pages))
438 __free_pages(page, pool_size_order);
439out:
440 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
441 atomic_pool_size / 1024);
442 return -ENOMEM;
443}
444
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500445/********************************************
446 * The following APIs are for dummy DMA ops *
447 ********************************************/
448
449static void *__dummy_alloc(struct device *dev, size_t size,
450 dma_addr_t *dma_handle, gfp_t flags,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700451 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500452{
453 return NULL;
454}
455
456static void __dummy_free(struct device *dev, size_t size,
457 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700458 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500459{
460}
461
462static int __dummy_mmap(struct device *dev,
463 struct vm_area_struct *vma,
464 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700465 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500466{
467 return -ENXIO;
468}
469
470static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
471 unsigned long offset, size_t size,
472 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700473 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500474{
475 return DMA_ERROR_CODE;
476}
477
478static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
479 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700480 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500481{
482}
483
484static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
485 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700486 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500487{
488 return 0;
489}
490
491static void __dummy_unmap_sg(struct device *dev,
492 struct scatterlist *sgl, int nelems,
493 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700494 unsigned long attrs)
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500495{
496}
497
498static void __dummy_sync_single(struct device *dev,
499 dma_addr_t dev_addr, size_t size,
500 enum dma_data_direction dir)
501{
502}
503
504static void __dummy_sync_sg(struct device *dev,
505 struct scatterlist *sgl, int nelems,
506 enum dma_data_direction dir)
507{
508}
509
510static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
511{
512 return 1;
513}
514
515static int __dummy_dma_supported(struct device *hwdev, u64 mask)
516{
517 return 0;
518}
519
Bart Van Assche52997092017-01-20 13:04:01 -0800520const struct dma_map_ops dummy_dma_ops = {
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500521 .alloc = __dummy_alloc,
522 .free = __dummy_free,
523 .mmap = __dummy_mmap,
524 .map_page = __dummy_map_page,
525 .unmap_page = __dummy_unmap_page,
526 .map_sg = __dummy_map_sg,
527 .unmap_sg = __dummy_unmap_sg,
528 .sync_single_for_cpu = __dummy_sync_single,
529 .sync_single_for_device = __dummy_sync_single,
530 .sync_sg_for_cpu = __dummy_sync_sg,
531 .sync_sg_for_device = __dummy_sync_sg,
532 .mapping_error = __dummy_mapping_error,
533 .dma_supported = __dummy_dma_supported,
534};
535EXPORT_SYMBOL(dummy_dma_ops);
536
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000537static int __init arm64_dma_init(void)
Catalin Marinas09b55412012-03-05 11:49:30 +0000538{
Geert Uytterhoevenae7871b2016-12-16 14:28:41 +0100539 if (swiotlb_force == SWIOTLB_FORCE ||
540 max_pfn > (arm64_dma_phys_limit >> PAGE_SHIFT))
Jisheng Zhangb67a8b22016-06-08 15:53:46 +0800541 swiotlb = 1;
542
Arnd Bergmann1dccb592015-11-16 17:25:48 +0100543 return atomic_pool_init();
Laura Abbottd4932f92014-10-09 15:26:44 -0700544}
545arch_initcall(arm64_dma_init);
Catalin Marinas09b55412012-03-05 11:49:30 +0000546
547#define PREALLOC_DMA_DEBUG_ENTRIES 4096
548
549static int __init dma_debug_do_init(void)
550{
551 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
552 return 0;
553}
554fs_initcall(dma_debug_do_init);
Robin Murphy13b86292015-10-01 20:13:59 +0100555
556
557#ifdef CONFIG_IOMMU_DMA
558#include <linux/dma-iommu.h>
559#include <linux/platform_device.h>
560#include <linux/amba/bus.h>
561
562/* Thankfully, all cache ops are by VA so we can ignore phys here */
563static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
564{
Kwangwoo Leed34fdb72016-08-02 09:50:50 +0900565 __dma_flush_area(virt, PAGE_SIZE);
Robin Murphy13b86292015-10-01 20:13:59 +0100566}
567
568static void *__iommu_alloc_attrs(struct device *dev, size_t size,
569 dma_addr_t *handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700570 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100571{
572 bool coherent = is_device_dma_coherent(dev);
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530573 int ioprot = dma_info_to_prot(DMA_BIDIRECTIONAL, coherent, attrs);
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000574 size_t iosize = size;
Robin Murphy13b86292015-10-01 20:13:59 +0100575 void *addr;
576
577 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
578 return NULL;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000579
580 size = PAGE_ALIGN(size);
581
Robin Murphy13b86292015-10-01 20:13:59 +0100582 /*
583 * Some drivers rely on this, and we probably don't want the
584 * possibility of stale kernel data being read by devices anyway.
585 */
586 gfp |= __GFP_ZERO;
587
Andrew Mortonce5c2d22015-11-07 16:06:59 -0800588 if (gfpflags_allow_blocking(gfp)) {
Robin Murphy13b86292015-10-01 20:13:59 +0100589 struct page **pages;
590 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
591
Robin Murphy3b6b7e12016-04-13 17:29:10 +0100592 pages = iommu_dma_alloc(dev, iosize, gfp, attrs, ioprot,
593 handle, flush_page);
Robin Murphy13b86292015-10-01 20:13:59 +0100594 if (!pages)
595 return NULL;
596
597 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
598 __builtin_return_address(0));
599 if (!addr)
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000600 iommu_dma_free(dev, pages, iosize, handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100601 } else {
602 struct page *page;
603 /*
604 * In atomic context we can't remap anything, so we'll only
605 * get the virtually contiguous buffer we need by way of a
606 * physically contiguous allocation.
607 */
608 if (coherent) {
609 page = alloc_pages(gfp, get_order(size));
610 addr = page ? page_address(page) : NULL;
611 } else {
612 addr = __alloc_from_pool(size, &page, gfp);
613 }
614 if (!addr)
615 return NULL;
616
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000617 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
Robin Murphy13b86292015-10-01 20:13:59 +0100618 if (iommu_dma_mapping_error(dev, *handle)) {
619 if (coherent)
620 __free_pages(page, get_order(size));
621 else
622 __free_from_pool(addr, size);
623 addr = NULL;
624 }
625 }
626 return addr;
627}
628
629static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700630 dma_addr_t handle, unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100631{
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000632 size_t iosize = size;
633
634 size = PAGE_ALIGN(size);
Robin Murphy13b86292015-10-01 20:13:59 +0100635 /*
636 * @cpu_addr will be one of 3 things depending on how it was allocated:
637 * - A remapped array of pages from iommu_dma_alloc(), for all
638 * non-atomic allocations.
639 * - A non-cacheable alias from the atomic pool, for atomic
640 * allocations by non-coherent devices.
641 * - A normal lowmem address, for atomic allocations by
642 * coherent devices.
643 * Hence how dodgy the below logic looks...
644 */
645 if (__in_atomic_pool(cpu_addr, size)) {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700646 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
Robin Murphy13b86292015-10-01 20:13:59 +0100647 __free_from_pool(cpu_addr, size);
648 } else if (is_vmalloc_addr(cpu_addr)){
649 struct vm_struct *area = find_vm_area(cpu_addr);
650
651 if (WARN_ON(!area || !area->pages))
652 return;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000653 iommu_dma_free(dev, area->pages, iosize, &handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100654 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
655 } else {
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700656 iommu_dma_unmap_page(dev, handle, iosize, 0, 0);
Robin Murphy13b86292015-10-01 20:13:59 +0100657 __free_pages(virt_to_page(cpu_addr), get_order(size));
658 }
659}
660
661static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
662 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700663 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100664{
665 struct vm_struct *area;
666 int ret;
667
668 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
669 is_device_dma_coherent(dev));
670
671 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
672 return ret;
673
674 area = find_vm_area(cpu_addr);
675 if (WARN_ON(!area || !area->pages))
676 return -ENXIO;
677
678 return iommu_dma_mmap(area->pages, size, vma);
679}
680
681static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
682 void *cpu_addr, dma_addr_t dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700683 size_t size, unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100684{
685 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
686 struct vm_struct *area = find_vm_area(cpu_addr);
687
688 if (WARN_ON(!area || !area->pages))
689 return -ENXIO;
690
691 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
692 GFP_KERNEL);
693}
694
695static void __iommu_sync_single_for_cpu(struct device *dev,
696 dma_addr_t dev_addr, size_t size,
697 enum dma_data_direction dir)
698{
699 phys_addr_t phys;
700
701 if (is_device_dma_coherent(dev))
702 return;
703
704 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
705 __dma_unmap_area(phys_to_virt(phys), size, dir);
706}
707
708static void __iommu_sync_single_for_device(struct device *dev,
709 dma_addr_t dev_addr, size_t size,
710 enum dma_data_direction dir)
711{
712 phys_addr_t phys;
713
714 if (is_device_dma_coherent(dev))
715 return;
716
717 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
718 __dma_map_area(phys_to_virt(phys), size, dir);
719}
720
721static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
722 unsigned long offset, size_t size,
723 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700724 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100725{
726 bool coherent = is_device_dma_coherent(dev);
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530727 int prot = dma_info_to_prot(dir, coherent, attrs);
Robin Murphy13b86292015-10-01 20:13:59 +0100728 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
729
730 if (!iommu_dma_mapping_error(dev, dev_addr) &&
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700731 (attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100732 __iommu_sync_single_for_device(dev, dev_addr, size, dir);
733
734 return dev_addr;
735}
736
737static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
738 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700739 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100740{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700741 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100742 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
743
744 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
745}
746
747static void __iommu_sync_sg_for_cpu(struct device *dev,
748 struct scatterlist *sgl, int nelems,
749 enum dma_data_direction dir)
750{
751 struct scatterlist *sg;
752 int i;
753
754 if (is_device_dma_coherent(dev))
755 return;
756
757 for_each_sg(sgl, sg, nelems, i)
758 __dma_unmap_area(sg_virt(sg), sg->length, dir);
759}
760
761static void __iommu_sync_sg_for_device(struct device *dev,
762 struct scatterlist *sgl, int nelems,
763 enum dma_data_direction dir)
764{
765 struct scatterlist *sg;
766 int i;
767
768 if (is_device_dma_coherent(dev))
769 return;
770
771 for_each_sg(sgl, sg, nelems, i)
772 __dma_map_area(sg_virt(sg), sg->length, dir);
773}
774
775static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
776 int nelems, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700777 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100778{
779 bool coherent = is_device_dma_coherent(dev);
780
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700781 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100782 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
783
784 return iommu_dma_map_sg(dev, sgl, nelems,
Mitchel Humpherys737c85c2017-01-06 18:58:12 +0530785 dma_info_to_prot(dir, coherent, attrs));
Robin Murphy13b86292015-10-01 20:13:59 +0100786}
787
788static void __iommu_unmap_sg_attrs(struct device *dev,
789 struct scatterlist *sgl, int nelems,
790 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700791 unsigned long attrs)
Robin Murphy13b86292015-10-01 20:13:59 +0100792{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700793 if ((attrs & DMA_ATTR_SKIP_CPU_SYNC) == 0)
Robin Murphy13b86292015-10-01 20:13:59 +0100794 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
795
796 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
797}
798
Bart Van Assche52997092017-01-20 13:04:01 -0800799static const struct dma_map_ops iommu_dma_ops = {
Robin Murphy13b86292015-10-01 20:13:59 +0100800 .alloc = __iommu_alloc_attrs,
801 .free = __iommu_free_attrs,
802 .mmap = __iommu_mmap_attrs,
803 .get_sgtable = __iommu_get_sgtable,
804 .map_page = __iommu_map_page,
805 .unmap_page = __iommu_unmap_page,
806 .map_sg = __iommu_map_sg_attrs,
807 .unmap_sg = __iommu_unmap_sg_attrs,
808 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
809 .sync_single_for_device = __iommu_sync_single_for_device,
810 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
811 .sync_sg_for_device = __iommu_sync_sg_for_device,
Robin Murphy60c4e802016-11-14 12:16:27 +0000812 .map_resource = iommu_dma_map_resource,
813 .unmap_resource = iommu_dma_unmap_resource,
Robin Murphy13b86292015-10-01 20:13:59 +0100814 .mapping_error = iommu_dma_mapping_error,
815};
816
817/*
818 * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
819 * everything it needs to - the device is only partially created and the
820 * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
821 * need this delayed attachment dance. Once IOMMU probe ordering is sorted
822 * to move the arch_setup_dma_ops() call later, all the notifier bits below
823 * become unnecessary, and will go away.
824 */
825struct iommu_dma_notifier_data {
826 struct list_head list;
827 struct device *dev;
828 const struct iommu_ops *ops;
829 u64 dma_base;
830 u64 size;
831};
832static LIST_HEAD(iommu_dma_masters);
833static DEFINE_MUTEX(iommu_dma_notifier_lock);
834
Robin Murphy13b86292015-10-01 20:13:59 +0100835static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
836 u64 dma_base, u64 size)
837{
838 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
839
840 /*
Robin Murphy921b1f52016-04-19 17:01:32 +0100841 * If the IOMMU driver has the DMA domain support that we require,
842 * then the IOMMU core will have already configured a group for this
843 * device, and allocated the default domain for that group.
Robin Murphy13b86292015-10-01 20:13:59 +0100844 */
Will Deacon4a8d8a12017-01-06 10:49:12 +0000845 if (!domain)
846 goto out_err;
847
848 if (domain->type == IOMMU_DOMAIN_DMA) {
849 if (iommu_dma_init_domain(domain, dma_base, size, dev))
850 goto out_err;
851
Linus Torvaldsac1820f2017-02-25 13:45:43 -0800852 dev->dma_ops = &iommu_dma_ops;
Robin Murphy13b86292015-10-01 20:13:59 +0100853 }
854
Robin Murphy13b86292015-10-01 20:13:59 +0100855 return true;
Will Deacon4a8d8a12017-01-06 10:49:12 +0000856out_err:
857 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
858 dev_name(dev));
859 return false;
Robin Murphy13b86292015-10-01 20:13:59 +0100860}
861
862static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
863 u64 dma_base, u64 size)
864{
865 struct iommu_dma_notifier_data *iommudata;
866
867 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
868 if (!iommudata)
869 return;
870
871 iommudata->dev = dev;
872 iommudata->ops = ops;
873 iommudata->dma_base = dma_base;
874 iommudata->size = size;
875
876 mutex_lock(&iommu_dma_notifier_lock);
877 list_add(&iommudata->list, &iommu_dma_masters);
878 mutex_unlock(&iommu_dma_notifier_lock);
879}
880
881static int __iommu_attach_notifier(struct notifier_block *nb,
882 unsigned long action, void *data)
883{
884 struct iommu_dma_notifier_data *master, *tmp;
885
Lorenzo Pieralisi16c11322016-07-01 17:50:10 +0100886 if (action != BUS_NOTIFY_BIND_DRIVER)
Robin Murphy13b86292015-10-01 20:13:59 +0100887 return 0;
888
889 mutex_lock(&iommu_dma_notifier_lock);
890 list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
Lorenzo Pieralisi16c11322016-07-01 17:50:10 +0100891 if (data == master->dev && do_iommu_attach(master->dev,
892 master->ops, master->dma_base, master->size)) {
Robin Murphy13b86292015-10-01 20:13:59 +0100893 list_del(&master->list);
894 kfree(master);
Lorenzo Pieralisi16c11322016-07-01 17:50:10 +0100895 break;
Robin Murphy13b86292015-10-01 20:13:59 +0100896 }
897 }
898 mutex_unlock(&iommu_dma_notifier_lock);
899 return 0;
900}
901
Jisheng Zhanga7c61a32015-11-20 17:59:10 +0800902static int __init register_iommu_dma_ops_notifier(struct bus_type *bus)
Robin Murphy13b86292015-10-01 20:13:59 +0100903{
904 struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
905 int ret;
906
907 if (!nb)
908 return -ENOMEM;
Lorenzo Pieralisi16c11322016-07-01 17:50:10 +0100909
Robin Murphy13b86292015-10-01 20:13:59 +0100910 nb->notifier_call = __iommu_attach_notifier;
Robin Murphy13b86292015-10-01 20:13:59 +0100911
912 ret = bus_register_notifier(bus, nb);
913 if (ret) {
914 pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
915 bus->name);
916 kfree(nb);
917 }
918 return ret;
919}
920
921static int __init __iommu_dma_init(void)
922{
923 int ret;
924
925 ret = iommu_dma_init();
926 if (!ret)
927 ret = register_iommu_dma_ops_notifier(&platform_bus_type);
928 if (!ret)
929 ret = register_iommu_dma_ops_notifier(&amba_bustype);
Robin Murphy226d89c2016-04-19 17:01:31 +0100930#ifdef CONFIG_PCI
931 if (!ret)
932 ret = register_iommu_dma_ops_notifier(&pci_bus_type);
933#endif
Robin Murphy13b86292015-10-01 20:13:59 +0100934 return ret;
935}
936arch_initcall(__iommu_dma_init);
937
938static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
939 const struct iommu_ops *ops)
940{
941 struct iommu_group *group;
942
943 if (!ops)
944 return;
945 /*
946 * TODO: As a concession to the future, we're ready to handle being
947 * called both early and late (i.e. after bus_add_device). Once all
948 * the platform bus code is reworked to call us late and the notifier
949 * junk above goes away, move the body of do_iommu_attach here.
950 */
951 group = iommu_group_get(dev);
952 if (group) {
953 do_iommu_attach(dev, ops, dma_base, size);
954 iommu_group_put(group);
955 } else {
956 queue_iommu_attach(dev, ops, dma_base, size);
957 }
958}
959
Robin Murphy876945d2015-10-01 20:14:00 +0100960void arch_teardown_dma_ops(struct device *dev)
961{
Bart Van Assche56579332017-01-20 13:04:02 -0800962 dev->dma_ops = NULL;
Robin Murphy876945d2015-10-01 20:14:00 +0100963}
964
Robin Murphy13b86292015-10-01 20:13:59 +0100965#else
966
967static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
Robin Murphy53c92d72016-04-07 18:42:05 +0100968 const struct iommu_ops *iommu)
Robin Murphy13b86292015-10-01 20:13:59 +0100969{ }
970
971#endif /* CONFIG_IOMMU_DMA */
972
Robin Murphy876945d2015-10-01 20:14:00 +0100973void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
Robin Murphy53c92d72016-04-07 18:42:05 +0100974 const struct iommu_ops *iommu, bool coherent)
Robin Murphy876945d2015-10-01 20:14:00 +0100975{
Bart Van Assche56579332017-01-20 13:04:02 -0800976 if (!dev->dma_ops)
977 dev->dma_ops = &swiotlb_dma_ops;
Robin Murphy876945d2015-10-01 20:14:00 +0100978
979 dev->archdata.dma_coherent = coherent;
980 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
981}