blob: 97fd714035f9ca0f5657bae66f5d75a9716248b4 [file] [log] [blame]
Catalin Marinas09b55412012-03-05 11:49:30 +00001/*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/gfp.h>
21#include <linux/export.h>
22#include <linux/slab.h>
Laura Abbottd4932f92014-10-09 15:26:44 -070023#include <linux/genalloc.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000024#include <linux/dma-mapping.h>
Laura Abbott6ac21042013-12-12 19:28:33 +000025#include <linux/dma-contiguous.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000026#include <linux/vmalloc.h>
27#include <linux/swiotlb.h>
28
29#include <asm/cacheflush.h>
30
31struct dma_map_ops *dma_ops;
32EXPORT_SYMBOL(dma_ops);
33
Laura Abbott214fdbe2014-03-14 19:52:24 +000034static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
35 bool coherent)
36{
Catalin Marinas196adf22014-03-24 10:35:35 +000037 if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
Laura Abbott214fdbe2014-03-14 19:52:24 +000038 return pgprot_writecombine(prot);
Laura Abbott214fdbe2014-03-14 19:52:24 +000039 return prot;
40}
41
Laura Abbottd4932f92014-10-09 15:26:44 -070042static struct gen_pool *atomic_pool;
43
44#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
45static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
46
47static int __init early_coherent_pool(char *p)
48{
49 atomic_pool_size = memparse(p, &p);
50 return 0;
51}
52early_param("coherent_pool", early_coherent_pool);
53
Suzuki K. Poulose71328132015-03-19 18:17:09 +000054static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
Laura Abbottd4932f92014-10-09 15:26:44 -070055{
56 unsigned long val;
57 void *ptr = NULL;
58
59 if (!atomic_pool) {
60 WARN(1, "coherent pool not initialised!\n");
61 return NULL;
62 }
63
64 val = gen_pool_alloc(atomic_pool, size);
65 if (val) {
66 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
67
68 *ret_page = phys_to_page(phys);
69 ptr = (void *)val;
Marek Szyprowski6829e272015-04-23 12:46:16 +010070 memset(ptr, 0, size);
Laura Abbottd4932f92014-10-09 15:26:44 -070071 }
72
73 return ptr;
74}
75
76static bool __in_atomic_pool(void *start, size_t size)
77{
78 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
79}
80
81static int __free_from_pool(void *start, size_t size)
82{
83 if (!__in_atomic_pool(start, size))
84 return 0;
85
86 gen_pool_free(atomic_pool, (unsigned long)start, size);
87
88 return 1;
89}
90
Ritesh Harjanibb10eb72014-02-06 17:21:51 +053091static void *__dma_alloc_coherent(struct device *dev, size_t size,
92 dma_addr_t *dma_handle, gfp_t flags,
93 struct dma_attrs *attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +000094{
Laura Abbottc666e8d2013-12-12 19:28:32 +000095 if (dev == NULL) {
96 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
97 return NULL;
98 }
99
Catalin Marinas19e76402014-02-27 12:09:22 +0000100 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
Catalin Marinas09b55412012-03-05 11:49:30 +0000101 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
Catalin Marinas19e76402014-02-27 12:09:22 +0000102 flags |= GFP_DMA;
Mel Gormand0164ad2015-11-06 16:28:21 -0800103 if (dev_get_cma_area(dev) && gfpflags_allow_blocking(flags)) {
Laura Abbott6ac21042013-12-12 19:28:33 +0000104 struct page *page;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000105 void *addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000106
107 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
108 get_order(size));
109 if (!page)
110 return NULL;
111
112 *dma_handle = phys_to_dma(dev, page_to_phys(page));
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000113 addr = page_address(page);
Marek Szyprowski6829e272015-04-23 12:46:16 +0100114 memset(addr, 0, size);
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000115 return addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000116 } else {
117 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
118 }
Catalin Marinas09b55412012-03-05 11:49:30 +0000119}
120
Ritesh Harjanibb10eb72014-02-06 17:21:51 +0530121static void __dma_free_coherent(struct device *dev, size_t size,
122 void *vaddr, dma_addr_t dma_handle,
123 struct dma_attrs *attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +0000124{
Laura Abbottd4932f92014-10-09 15:26:44 -0700125 bool freed;
126 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
127
Laura Abbottc666e8d2013-12-12 19:28:32 +0000128 if (dev == NULL) {
129 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
130 return;
131 }
132
Laura Abbottd4932f92014-10-09 15:26:44 -0700133 freed = dma_release_from_contiguous(dev,
Laura Abbott6ac21042013-12-12 19:28:33 +0000134 phys_to_page(paddr),
135 size >> PAGE_SHIFT);
Laura Abbottd4932f92014-10-09 15:26:44 -0700136 if (!freed)
Laura Abbott6ac21042013-12-12 19:28:33 +0000137 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
Catalin Marinas09b55412012-03-05 11:49:30 +0000138}
139
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000140static void *__dma_alloc(struct device *dev, size_t size,
141 dma_addr_t *dma_handle, gfp_t flags,
142 struct dma_attrs *attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100143{
Laura Abbottd4932f92014-10-09 15:26:44 -0700144 struct page *page;
Catalin Marinas73635902013-05-21 17:35:19 +0100145 void *ptr, *coherent_ptr;
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000146 bool coherent = is_device_dma_coherent(dev);
Robin Murphy97942c22015-07-31 18:28:34 +0100147 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, false);
Catalin Marinas73635902013-05-21 17:35:19 +0100148
149 size = PAGE_ALIGN(size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700150
Mel Gormand0164ad2015-11-06 16:28:21 -0800151 if (!coherent && !gfpflags_allow_blocking(flags)) {
Laura Abbottd4932f92014-10-09 15:26:44 -0700152 struct page *page = NULL;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000153 void *addr = __alloc_from_pool(size, &page, flags);
Laura Abbottd4932f92014-10-09 15:26:44 -0700154
155 if (addr)
156 *dma_handle = phys_to_dma(dev, page_to_phys(page));
157
158 return addr;
Laura Abbottd4932f92014-10-09 15:26:44 -0700159 }
Catalin Marinas73635902013-05-21 17:35:19 +0100160
161 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
162 if (!ptr)
163 goto no_mem;
Catalin Marinas73635902013-05-21 17:35:19 +0100164
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000165 /* no need for non-cacheable mapping if coherent */
166 if (coherent)
167 return ptr;
168
Catalin Marinas73635902013-05-21 17:35:19 +0100169 /* remove any dirty cache lines on the kernel alias */
170 __dma_flush_range(ptr, ptr + size);
171
172 /* create a coherent mapping */
173 page = virt_to_page(ptr);
Laura Abbottd4932f92014-10-09 15:26:44 -0700174 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
Robin Murphy97942c22015-07-31 18:28:34 +0100175 prot, NULL);
Catalin Marinas73635902013-05-21 17:35:19 +0100176 if (!coherent_ptr)
177 goto no_map;
178
179 return coherent_ptr;
180
181no_map:
182 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
183no_mem:
Sean Paula52ce122014-10-01 16:31:50 +0100184 *dma_handle = DMA_ERROR_CODE;
Catalin Marinas73635902013-05-21 17:35:19 +0100185 return NULL;
186}
187
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000188static void __dma_free(struct device *dev, size_t size,
189 void *vaddr, dma_addr_t dma_handle,
190 struct dma_attrs *attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100191{
192 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
193
Dean Nelson2cff98b2015-04-29 16:09:18 +0100194 size = PAGE_ALIGN(size);
195
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000196 if (!is_device_dma_coherent(dev)) {
197 if (__free_from_pool(vaddr, size))
198 return;
199 vunmap(vaddr);
200 }
Catalin Marinas73635902013-05-21 17:35:19 +0100201 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
202}
203
204static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
205 unsigned long offset, size_t size,
206 enum dma_data_direction dir,
207 struct dma_attrs *attrs)
208{
209 dma_addr_t dev_addr;
210
211 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000212 if (!is_device_dma_coherent(dev))
213 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100214
215 return dev_addr;
216}
217
218
219static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
220 size_t size, enum dma_data_direction dir,
221 struct dma_attrs *attrs)
222{
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000223 if (!is_device_dma_coherent(dev))
224 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100225 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
226}
227
228static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
229 int nelems, enum dma_data_direction dir,
230 struct dma_attrs *attrs)
231{
232 struct scatterlist *sg;
233 int i, ret;
234
235 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000236 if (!is_device_dma_coherent(dev))
237 for_each_sg(sgl, sg, ret, i)
238 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
239 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100240
241 return ret;
242}
243
244static void __swiotlb_unmap_sg_attrs(struct device *dev,
245 struct scatterlist *sgl, int nelems,
246 enum dma_data_direction dir,
247 struct dma_attrs *attrs)
248{
249 struct scatterlist *sg;
250 int i;
251
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000252 if (!is_device_dma_coherent(dev))
253 for_each_sg(sgl, sg, nelems, i)
254 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
255 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100256 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
257}
258
259static void __swiotlb_sync_single_for_cpu(struct device *dev,
260 dma_addr_t dev_addr, size_t size,
261 enum dma_data_direction dir)
262{
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000263 if (!is_device_dma_coherent(dev))
264 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100265 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
266}
267
268static void __swiotlb_sync_single_for_device(struct device *dev,
269 dma_addr_t dev_addr, size_t size,
270 enum dma_data_direction dir)
271{
272 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000273 if (!is_device_dma_coherent(dev))
274 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100275}
276
277static void __swiotlb_sync_sg_for_cpu(struct device *dev,
278 struct scatterlist *sgl, int nelems,
279 enum dma_data_direction dir)
280{
281 struct scatterlist *sg;
282 int i;
283
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000284 if (!is_device_dma_coherent(dev))
285 for_each_sg(sgl, sg, nelems, i)
286 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
287 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100288 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
289}
290
291static void __swiotlb_sync_sg_for_device(struct device *dev,
292 struct scatterlist *sgl, int nelems,
293 enum dma_data_direction dir)
294{
295 struct scatterlist *sg;
296 int i;
297
298 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000299 if (!is_device_dma_coherent(dev))
300 for_each_sg(sgl, sg, nelems, i)
301 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
302 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100303}
304
Robin Murphyaaf6f2f2015-07-10 16:47:56 +0100305static int __swiotlb_mmap(struct device *dev,
306 struct vm_area_struct *vma,
307 void *cpu_addr, dma_addr_t dma_addr, size_t size,
308 struct dma_attrs *attrs)
Laura Abbott6e8d7962014-03-14 19:52:23 +0000309{
310 int ret = -ENXIO;
311 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
312 PAGE_SHIFT;
313 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
314 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
315 unsigned long off = vma->vm_pgoff;
316
Robin Murphyaaf6f2f2015-07-10 16:47:56 +0100317 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
318 is_device_dma_coherent(dev));
319
Laura Abbott6e8d7962014-03-14 19:52:23 +0000320 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
321 return ret;
322
323 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
324 ret = remap_pfn_range(vma, vma->vm_start,
325 pfn + off,
326 vma->vm_end - vma->vm_start,
327 vma->vm_page_prot);
328 }
329
330 return ret;
331}
332
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100333static int __swiotlb_get_sgtable(struct device *dev, struct sg_table *sgt,
334 void *cpu_addr, dma_addr_t handle, size_t size,
335 struct dma_attrs *attrs)
336{
337 int ret = sg_alloc_table(sgt, 1, GFP_KERNEL);
338
339 if (!ret)
340 sg_set_page(sgt->sgl, phys_to_page(dma_to_phys(dev, handle)),
341 PAGE_ALIGN(size), 0);
342
343 return ret;
344}
345
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000346static struct dma_map_ops swiotlb_dma_ops = {
347 .alloc = __dma_alloc,
348 .free = __dma_free,
349 .mmap = __swiotlb_mmap,
Robin Murphy1d1ddf62015-07-17 16:58:21 +0100350 .get_sgtable = __swiotlb_get_sgtable,
Catalin Marinas73635902013-05-21 17:35:19 +0100351 .map_page = __swiotlb_map_page,
352 .unmap_page = __swiotlb_unmap_page,
353 .map_sg = __swiotlb_map_sg_attrs,
354 .unmap_sg = __swiotlb_unmap_sg_attrs,
355 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
356 .sync_single_for_device = __swiotlb_sync_single_for_device,
357 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
358 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
359 .dma_supported = swiotlb_dma_supported,
360 .mapping_error = swiotlb_dma_mapping_error,
361};
Catalin Marinas09b55412012-03-05 11:49:30 +0000362
Laura Abbottd4932f92014-10-09 15:26:44 -0700363static int __init atomic_pool_init(void)
364{
365 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
366 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
367 struct page *page;
368 void *addr;
369 unsigned int pool_size_order = get_order(atomic_pool_size);
370
371 if (dev_get_cma_area(NULL))
372 page = dma_alloc_from_contiguous(NULL, nr_pages,
373 pool_size_order);
374 else
375 page = alloc_pages(GFP_DMA, pool_size_order);
376
377 if (page) {
378 int ret;
379 void *page_addr = page_address(page);
380
381 memset(page_addr, 0, atomic_pool_size);
382 __dma_flush_range(page_addr, page_addr + atomic_pool_size);
383
384 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
385 if (!atomic_pool)
386 goto free_page;
387
388 addr = dma_common_contiguous_remap(page, atomic_pool_size,
389 VM_USERMAP, prot, atomic_pool_init);
390
391 if (!addr)
392 goto destroy_genpool;
393
394 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
395 page_to_phys(page),
396 atomic_pool_size, -1);
397 if (ret)
398 goto remove_mapping;
399
400 gen_pool_set_algo(atomic_pool,
401 gen_pool_first_fit_order_align,
402 (void *)PAGE_SHIFT);
403
404 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
405 atomic_pool_size / 1024);
406 return 0;
407 }
408 goto out;
409
410remove_mapping:
411 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
412destroy_genpool:
413 gen_pool_destroy(atomic_pool);
414 atomic_pool = NULL;
415free_page:
416 if (!dma_release_from_contiguous(NULL, page, nr_pages))
417 __free_pages(page, pool_size_order);
418out:
419 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
420 atomic_pool_size / 1024);
421 return -ENOMEM;
422}
423
Suthikulpanit, Suraveeb6197b92015-06-10 11:08:53 -0500424/********************************************
425 * The following APIs are for dummy DMA ops *
426 ********************************************/
427
428static void *__dummy_alloc(struct device *dev, size_t size,
429 dma_addr_t *dma_handle, gfp_t flags,
430 struct dma_attrs *attrs)
431{
432 return NULL;
433}
434
435static void __dummy_free(struct device *dev, size_t size,
436 void *vaddr, dma_addr_t dma_handle,
437 struct dma_attrs *attrs)
438{
439}
440
441static int __dummy_mmap(struct device *dev,
442 struct vm_area_struct *vma,
443 void *cpu_addr, dma_addr_t dma_addr, size_t size,
444 struct dma_attrs *attrs)
445{
446 return -ENXIO;
447}
448
449static dma_addr_t __dummy_map_page(struct device *dev, struct page *page,
450 unsigned long offset, size_t size,
451 enum dma_data_direction dir,
452 struct dma_attrs *attrs)
453{
454 return DMA_ERROR_CODE;
455}
456
457static void __dummy_unmap_page(struct device *dev, dma_addr_t dev_addr,
458 size_t size, enum dma_data_direction dir,
459 struct dma_attrs *attrs)
460{
461}
462
463static int __dummy_map_sg(struct device *dev, struct scatterlist *sgl,
464 int nelems, enum dma_data_direction dir,
465 struct dma_attrs *attrs)
466{
467 return 0;
468}
469
470static void __dummy_unmap_sg(struct device *dev,
471 struct scatterlist *sgl, int nelems,
472 enum dma_data_direction dir,
473 struct dma_attrs *attrs)
474{
475}
476
477static void __dummy_sync_single(struct device *dev,
478 dma_addr_t dev_addr, size_t size,
479 enum dma_data_direction dir)
480{
481}
482
483static void __dummy_sync_sg(struct device *dev,
484 struct scatterlist *sgl, int nelems,
485 enum dma_data_direction dir)
486{
487}
488
489static int __dummy_mapping_error(struct device *hwdev, dma_addr_t dma_addr)
490{
491 return 1;
492}
493
494static int __dummy_dma_supported(struct device *hwdev, u64 mask)
495{
496 return 0;
497}
498
499struct dma_map_ops dummy_dma_ops = {
500 .alloc = __dummy_alloc,
501 .free = __dummy_free,
502 .mmap = __dummy_mmap,
503 .map_page = __dummy_map_page,
504 .unmap_page = __dummy_unmap_page,
505 .map_sg = __dummy_map_sg,
506 .unmap_sg = __dummy_unmap_sg,
507 .sync_single_for_cpu = __dummy_sync_single,
508 .sync_single_for_device = __dummy_sync_single,
509 .sync_sg_for_cpu = __dummy_sync_sg,
510 .sync_sg_for_device = __dummy_sync_sg,
511 .mapping_error = __dummy_mapping_error,
512 .dma_supported = __dummy_dma_supported,
513};
514EXPORT_SYMBOL(dummy_dma_ops);
515
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000516static int __init arm64_dma_init(void)
Catalin Marinas09b55412012-03-05 11:49:30 +0000517{
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000518 int ret;
Catalin Marinas36909512014-02-27 12:24:57 +0000519
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000520 dma_ops = &swiotlb_dma_ops;
Catalin Marinas36909512014-02-27 12:24:57 +0000521
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000522 ret = atomic_pool_init();
Laura Abbottd4932f92014-10-09 15:26:44 -0700523
524 return ret;
525}
526arch_initcall(arm64_dma_init);
Catalin Marinas09b55412012-03-05 11:49:30 +0000527
528#define PREALLOC_DMA_DEBUG_ENTRIES 4096
529
530static int __init dma_debug_do_init(void)
531{
532 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
533 return 0;
534}
535fs_initcall(dma_debug_do_init);
Robin Murphy13b86292015-10-01 20:13:59 +0100536
537
538#ifdef CONFIG_IOMMU_DMA
539#include <linux/dma-iommu.h>
540#include <linux/platform_device.h>
541#include <linux/amba/bus.h>
542
543/* Thankfully, all cache ops are by VA so we can ignore phys here */
544static void flush_page(struct device *dev, const void *virt, phys_addr_t phys)
545{
546 __dma_flush_range(virt, virt + PAGE_SIZE);
547}
548
549static void *__iommu_alloc_attrs(struct device *dev, size_t size,
550 dma_addr_t *handle, gfp_t gfp,
551 struct dma_attrs *attrs)
552{
553 bool coherent = is_device_dma_coherent(dev);
554 int ioprot = dma_direction_to_prot(DMA_BIDIRECTIONAL, coherent);
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000555 size_t iosize = size;
Robin Murphy13b86292015-10-01 20:13:59 +0100556 void *addr;
557
558 if (WARN(!dev, "cannot create IOMMU mapping for unknown device\n"))
559 return NULL;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000560
561 size = PAGE_ALIGN(size);
562
Robin Murphy13b86292015-10-01 20:13:59 +0100563 /*
564 * Some drivers rely on this, and we probably don't want the
565 * possibility of stale kernel data being read by devices anyway.
566 */
567 gfp |= __GFP_ZERO;
568
Andrew Mortonce5c2d22015-11-07 16:06:59 -0800569 if (gfpflags_allow_blocking(gfp)) {
Robin Murphy13b86292015-10-01 20:13:59 +0100570 struct page **pages;
571 pgprot_t prot = __get_dma_pgprot(attrs, PAGE_KERNEL, coherent);
572
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000573 pages = iommu_dma_alloc(dev, iosize, gfp, ioprot, handle,
Robin Murphy13b86292015-10-01 20:13:59 +0100574 flush_page);
575 if (!pages)
576 return NULL;
577
578 addr = dma_common_pages_remap(pages, size, VM_USERMAP, prot,
579 __builtin_return_address(0));
580 if (!addr)
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000581 iommu_dma_free(dev, pages, iosize, handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100582 } else {
583 struct page *page;
584 /*
585 * In atomic context we can't remap anything, so we'll only
586 * get the virtually contiguous buffer we need by way of a
587 * physically contiguous allocation.
588 */
589 if (coherent) {
590 page = alloc_pages(gfp, get_order(size));
591 addr = page ? page_address(page) : NULL;
592 } else {
593 addr = __alloc_from_pool(size, &page, gfp);
594 }
595 if (!addr)
596 return NULL;
597
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000598 *handle = iommu_dma_map_page(dev, page, 0, iosize, ioprot);
Robin Murphy13b86292015-10-01 20:13:59 +0100599 if (iommu_dma_mapping_error(dev, *handle)) {
600 if (coherent)
601 __free_pages(page, get_order(size));
602 else
603 __free_from_pool(addr, size);
604 addr = NULL;
605 }
606 }
607 return addr;
608}
609
610static void __iommu_free_attrs(struct device *dev, size_t size, void *cpu_addr,
611 dma_addr_t handle, struct dma_attrs *attrs)
612{
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000613 size_t iosize = size;
614
615 size = PAGE_ALIGN(size);
Robin Murphy13b86292015-10-01 20:13:59 +0100616 /*
617 * @cpu_addr will be one of 3 things depending on how it was allocated:
618 * - A remapped array of pages from iommu_dma_alloc(), for all
619 * non-atomic allocations.
620 * - A non-cacheable alias from the atomic pool, for atomic
621 * allocations by non-coherent devices.
622 * - A normal lowmem address, for atomic allocations by
623 * coherent devices.
624 * Hence how dodgy the below logic looks...
625 */
626 if (__in_atomic_pool(cpu_addr, size)) {
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000627 iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
Robin Murphy13b86292015-10-01 20:13:59 +0100628 __free_from_pool(cpu_addr, size);
629 } else if (is_vmalloc_addr(cpu_addr)){
630 struct vm_struct *area = find_vm_area(cpu_addr);
631
632 if (WARN_ON(!area || !area->pages))
633 return;
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000634 iommu_dma_free(dev, area->pages, iosize, &handle);
Robin Murphy13b86292015-10-01 20:13:59 +0100635 dma_common_free_remap(cpu_addr, size, VM_USERMAP);
636 } else {
Robin Murphybd1c6ff2015-11-04 13:23:52 +0000637 iommu_dma_unmap_page(dev, handle, iosize, 0, NULL);
Robin Murphy13b86292015-10-01 20:13:59 +0100638 __free_pages(virt_to_page(cpu_addr), get_order(size));
639 }
640}
641
642static int __iommu_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
643 void *cpu_addr, dma_addr_t dma_addr, size_t size,
644 struct dma_attrs *attrs)
645{
646 struct vm_struct *area;
647 int ret;
648
649 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
650 is_device_dma_coherent(dev));
651
652 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
653 return ret;
654
655 area = find_vm_area(cpu_addr);
656 if (WARN_ON(!area || !area->pages))
657 return -ENXIO;
658
659 return iommu_dma_mmap(area->pages, size, vma);
660}
661
662static int __iommu_get_sgtable(struct device *dev, struct sg_table *sgt,
663 void *cpu_addr, dma_addr_t dma_addr,
664 size_t size, struct dma_attrs *attrs)
665{
666 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
667 struct vm_struct *area = find_vm_area(cpu_addr);
668
669 if (WARN_ON(!area || !area->pages))
670 return -ENXIO;
671
672 return sg_alloc_table_from_pages(sgt, area->pages, count, 0, size,
673 GFP_KERNEL);
674}
675
676static void __iommu_sync_single_for_cpu(struct device *dev,
677 dma_addr_t dev_addr, size_t size,
678 enum dma_data_direction dir)
679{
680 phys_addr_t phys;
681
682 if (is_device_dma_coherent(dev))
683 return;
684
685 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
686 __dma_unmap_area(phys_to_virt(phys), size, dir);
687}
688
689static void __iommu_sync_single_for_device(struct device *dev,
690 dma_addr_t dev_addr, size_t size,
691 enum dma_data_direction dir)
692{
693 phys_addr_t phys;
694
695 if (is_device_dma_coherent(dev))
696 return;
697
698 phys = iommu_iova_to_phys(iommu_get_domain_for_dev(dev), dev_addr);
699 __dma_map_area(phys_to_virt(phys), size, dir);
700}
701
702static dma_addr_t __iommu_map_page(struct device *dev, struct page *page,
703 unsigned long offset, size_t size,
704 enum dma_data_direction dir,
705 struct dma_attrs *attrs)
706{
707 bool coherent = is_device_dma_coherent(dev);
708 int prot = dma_direction_to_prot(dir, coherent);
709 dma_addr_t dev_addr = iommu_dma_map_page(dev, page, offset, size, prot);
710
711 if (!iommu_dma_mapping_error(dev, dev_addr) &&
712 !dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
713 __iommu_sync_single_for_device(dev, dev_addr, size, dir);
714
715 return dev_addr;
716}
717
718static void __iommu_unmap_page(struct device *dev, dma_addr_t dev_addr,
719 size_t size, enum dma_data_direction dir,
720 struct dma_attrs *attrs)
721{
722 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
723 __iommu_sync_single_for_cpu(dev, dev_addr, size, dir);
724
725 iommu_dma_unmap_page(dev, dev_addr, size, dir, attrs);
726}
727
728static void __iommu_sync_sg_for_cpu(struct device *dev,
729 struct scatterlist *sgl, int nelems,
730 enum dma_data_direction dir)
731{
732 struct scatterlist *sg;
733 int i;
734
735 if (is_device_dma_coherent(dev))
736 return;
737
738 for_each_sg(sgl, sg, nelems, i)
739 __dma_unmap_area(sg_virt(sg), sg->length, dir);
740}
741
742static void __iommu_sync_sg_for_device(struct device *dev,
743 struct scatterlist *sgl, int nelems,
744 enum dma_data_direction dir)
745{
746 struct scatterlist *sg;
747 int i;
748
749 if (is_device_dma_coherent(dev))
750 return;
751
752 for_each_sg(sgl, sg, nelems, i)
753 __dma_map_area(sg_virt(sg), sg->length, dir);
754}
755
756static int __iommu_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
757 int nelems, enum dma_data_direction dir,
758 struct dma_attrs *attrs)
759{
760 bool coherent = is_device_dma_coherent(dev);
761
762 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
763 __iommu_sync_sg_for_device(dev, sgl, nelems, dir);
764
765 return iommu_dma_map_sg(dev, sgl, nelems,
766 dma_direction_to_prot(dir, coherent));
767}
768
769static void __iommu_unmap_sg_attrs(struct device *dev,
770 struct scatterlist *sgl, int nelems,
771 enum dma_data_direction dir,
772 struct dma_attrs *attrs)
773{
774 if (!dma_get_attr(DMA_ATTR_SKIP_CPU_SYNC, attrs))
775 __iommu_sync_sg_for_cpu(dev, sgl, nelems, dir);
776
777 iommu_dma_unmap_sg(dev, sgl, nelems, dir, attrs);
778}
779
780static struct dma_map_ops iommu_dma_ops = {
781 .alloc = __iommu_alloc_attrs,
782 .free = __iommu_free_attrs,
783 .mmap = __iommu_mmap_attrs,
784 .get_sgtable = __iommu_get_sgtable,
785 .map_page = __iommu_map_page,
786 .unmap_page = __iommu_unmap_page,
787 .map_sg = __iommu_map_sg_attrs,
788 .unmap_sg = __iommu_unmap_sg_attrs,
789 .sync_single_for_cpu = __iommu_sync_single_for_cpu,
790 .sync_single_for_device = __iommu_sync_single_for_device,
791 .sync_sg_for_cpu = __iommu_sync_sg_for_cpu,
792 .sync_sg_for_device = __iommu_sync_sg_for_device,
793 .dma_supported = iommu_dma_supported,
794 .mapping_error = iommu_dma_mapping_error,
795};
796
797/*
798 * TODO: Right now __iommu_setup_dma_ops() gets called too early to do
799 * everything it needs to - the device is only partially created and the
800 * IOMMU driver hasn't seen it yet, so it can't have a group. Thus we
801 * need this delayed attachment dance. Once IOMMU probe ordering is sorted
802 * to move the arch_setup_dma_ops() call later, all the notifier bits below
803 * become unnecessary, and will go away.
804 */
805struct iommu_dma_notifier_data {
806 struct list_head list;
807 struct device *dev;
808 const struct iommu_ops *ops;
809 u64 dma_base;
810 u64 size;
811};
812static LIST_HEAD(iommu_dma_masters);
813static DEFINE_MUTEX(iommu_dma_notifier_lock);
814
815/*
816 * Temporarily "borrow" a domain feature flag to to tell if we had to resort
817 * to creating our own domain here, in case we need to clean it up again.
818 */
819#define __IOMMU_DOMAIN_FAKE_DEFAULT (1U << 31)
820
821static bool do_iommu_attach(struct device *dev, const struct iommu_ops *ops,
822 u64 dma_base, u64 size)
823{
824 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
825
826 /*
827 * Best case: The device is either part of a group which was
828 * already attached to a domain in a previous call, or it's
829 * been put in a default DMA domain by the IOMMU core.
830 */
831 if (!domain) {
832 /*
833 * Urgh. The IOMMU core isn't going to do default domains
834 * for non-PCI devices anyway, until it has some means of
835 * abstracting the entirely implementation-specific
836 * sideband data/SoC topology/unicorn dust that may or
837 * may not differentiate upstream masters.
838 * So until then, HORRIBLE HACKS!
839 */
840 domain = ops->domain_alloc(IOMMU_DOMAIN_DMA);
841 if (!domain)
842 goto out_no_domain;
843
844 domain->ops = ops;
845 domain->type = IOMMU_DOMAIN_DMA | __IOMMU_DOMAIN_FAKE_DEFAULT;
846
847 if (iommu_attach_device(domain, dev))
848 goto out_put_domain;
849 }
850
851 if (iommu_dma_init_domain(domain, dma_base, size))
852 goto out_detach;
853
854 dev->archdata.dma_ops = &iommu_dma_ops;
855 return true;
856
857out_detach:
858 iommu_detach_device(domain, dev);
859out_put_domain:
860 if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
861 iommu_domain_free(domain);
862out_no_domain:
863 pr_warn("Failed to set up IOMMU for device %s; retaining platform DMA ops\n",
864 dev_name(dev));
865 return false;
866}
867
868static void queue_iommu_attach(struct device *dev, const struct iommu_ops *ops,
869 u64 dma_base, u64 size)
870{
871 struct iommu_dma_notifier_data *iommudata;
872
873 iommudata = kzalloc(sizeof(*iommudata), GFP_KERNEL);
874 if (!iommudata)
875 return;
876
877 iommudata->dev = dev;
878 iommudata->ops = ops;
879 iommudata->dma_base = dma_base;
880 iommudata->size = size;
881
882 mutex_lock(&iommu_dma_notifier_lock);
883 list_add(&iommudata->list, &iommu_dma_masters);
884 mutex_unlock(&iommu_dma_notifier_lock);
885}
886
887static int __iommu_attach_notifier(struct notifier_block *nb,
888 unsigned long action, void *data)
889{
890 struct iommu_dma_notifier_data *master, *tmp;
891
892 if (action != BUS_NOTIFY_ADD_DEVICE)
893 return 0;
894
895 mutex_lock(&iommu_dma_notifier_lock);
896 list_for_each_entry_safe(master, tmp, &iommu_dma_masters, list) {
897 if (do_iommu_attach(master->dev, master->ops,
898 master->dma_base, master->size)) {
899 list_del(&master->list);
900 kfree(master);
901 }
902 }
903 mutex_unlock(&iommu_dma_notifier_lock);
904 return 0;
905}
906
907static int register_iommu_dma_ops_notifier(struct bus_type *bus)
908{
909 struct notifier_block *nb = kzalloc(sizeof(*nb), GFP_KERNEL);
910 int ret;
911
912 if (!nb)
913 return -ENOMEM;
914 /*
915 * The device must be attached to a domain before the driver probe
916 * routine gets a chance to start allocating DMA buffers. However,
917 * the IOMMU driver also needs a chance to configure the iommu_group
918 * via its add_device callback first, so we need to make the attach
919 * happen between those two points. Since the IOMMU core uses a bus
920 * notifier with default priority for add_device, do the same but
921 * with a lower priority to ensure the appropriate ordering.
922 */
923 nb->notifier_call = __iommu_attach_notifier;
924 nb->priority = -100;
925
926 ret = bus_register_notifier(bus, nb);
927 if (ret) {
928 pr_warn("Failed to register DMA domain notifier; IOMMU DMA ops unavailable on bus '%s'\n",
929 bus->name);
930 kfree(nb);
931 }
932 return ret;
933}
934
935static int __init __iommu_dma_init(void)
936{
937 int ret;
938
939 ret = iommu_dma_init();
940 if (!ret)
941 ret = register_iommu_dma_ops_notifier(&platform_bus_type);
942 if (!ret)
943 ret = register_iommu_dma_ops_notifier(&amba_bustype);
944 return ret;
945}
946arch_initcall(__iommu_dma_init);
947
948static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
949 const struct iommu_ops *ops)
950{
951 struct iommu_group *group;
952
953 if (!ops)
954 return;
955 /*
956 * TODO: As a concession to the future, we're ready to handle being
957 * called both early and late (i.e. after bus_add_device). Once all
958 * the platform bus code is reworked to call us late and the notifier
959 * junk above goes away, move the body of do_iommu_attach here.
960 */
961 group = iommu_group_get(dev);
962 if (group) {
963 do_iommu_attach(dev, ops, dma_base, size);
964 iommu_group_put(group);
965 } else {
966 queue_iommu_attach(dev, ops, dma_base, size);
967 }
968}
969
Robin Murphy876945d2015-10-01 20:14:00 +0100970void arch_teardown_dma_ops(struct device *dev)
971{
972 struct iommu_domain *domain = iommu_get_domain_for_dev(dev);
973
974 if (domain) {
975 iommu_detach_device(domain, dev);
976 if (domain->type & __IOMMU_DOMAIN_FAKE_DEFAULT)
977 iommu_domain_free(domain);
978 }
979
980 dev->archdata.dma_ops = NULL;
981}
982
Robin Murphy13b86292015-10-01 20:13:59 +0100983#else
984
985static void __iommu_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
986 struct iommu_ops *iommu)
987{ }
988
989#endif /* CONFIG_IOMMU_DMA */
990
Robin Murphy876945d2015-10-01 20:14:00 +0100991void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
992 struct iommu_ops *iommu, bool coherent)
993{
994 if (!acpi_disabled && !dev->archdata.dma_ops)
995 dev->archdata.dma_ops = dma_ops;
996
997 dev->archdata.dma_coherent = coherent;
998 __iommu_setup_dma_ops(dev, dma_base, size, iommu);
999}