blob: e0f14ee26b6818167cde2a6e955ffb59e339c9ad [file] [log] [blame]
Catalin Marinas09b55412012-03-05 11:49:30 +00001/*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/gfp.h>
21#include <linux/export.h>
22#include <linux/slab.h>
Laura Abbottd4932f92014-10-09 15:26:44 -070023#include <linux/genalloc.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000024#include <linux/dma-mapping.h>
Laura Abbott6ac21042013-12-12 19:28:33 +000025#include <linux/dma-contiguous.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000026#include <linux/vmalloc.h>
27#include <linux/swiotlb.h>
28
29#include <asm/cacheflush.h>
30
31struct dma_map_ops *dma_ops;
32EXPORT_SYMBOL(dma_ops);
33
Laura Abbott214fdbe2014-03-14 19:52:24 +000034static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
35 bool coherent)
36{
Catalin Marinas196adf22014-03-24 10:35:35 +000037 if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
Laura Abbott214fdbe2014-03-14 19:52:24 +000038 return pgprot_writecombine(prot);
Laura Abbott214fdbe2014-03-14 19:52:24 +000039 return prot;
40}
41
Laura Abbottd4932f92014-10-09 15:26:44 -070042static struct gen_pool *atomic_pool;
43
44#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
45static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
46
47static int __init early_coherent_pool(char *p)
48{
49 atomic_pool_size = memparse(p, &p);
50 return 0;
51}
52early_param("coherent_pool", early_coherent_pool);
53
Suzuki K. Poulose71328132015-03-19 18:17:09 +000054static void *__alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags)
Laura Abbottd4932f92014-10-09 15:26:44 -070055{
56 unsigned long val;
57 void *ptr = NULL;
58
59 if (!atomic_pool) {
60 WARN(1, "coherent pool not initialised!\n");
61 return NULL;
62 }
63
64 val = gen_pool_alloc(atomic_pool, size);
65 if (val) {
66 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
67
68 *ret_page = phys_to_page(phys);
69 ptr = (void *)val;
Marek Szyprowski6829e272015-04-23 12:46:16 +010070 memset(ptr, 0, size);
Laura Abbottd4932f92014-10-09 15:26:44 -070071 }
72
73 return ptr;
74}
75
76static bool __in_atomic_pool(void *start, size_t size)
77{
78 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
79}
80
81static int __free_from_pool(void *start, size_t size)
82{
83 if (!__in_atomic_pool(start, size))
84 return 0;
85
86 gen_pool_free(atomic_pool, (unsigned long)start, size);
87
88 return 1;
89}
90
Ritesh Harjanibb10eb72014-02-06 17:21:51 +053091static void *__dma_alloc_coherent(struct device *dev, size_t size,
92 dma_addr_t *dma_handle, gfp_t flags,
93 struct dma_attrs *attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +000094{
Laura Abbottc666e8d2013-12-12 19:28:32 +000095 if (dev == NULL) {
96 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
97 return NULL;
98 }
99
Catalin Marinas19e76402014-02-27 12:09:22 +0000100 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
Catalin Marinas09b55412012-03-05 11:49:30 +0000101 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
Catalin Marinas19e76402014-02-27 12:09:22 +0000102 flags |= GFP_DMA;
Laura Abbottd4932f92014-10-09 15:26:44 -0700103 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
Laura Abbott6ac21042013-12-12 19:28:33 +0000104 struct page *page;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000105 void *addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000106
Laura Abbottccc9e242014-02-04 23:08:57 +0000107 size = PAGE_ALIGN(size);
Laura Abbott6ac21042013-12-12 19:28:33 +0000108 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
109 get_order(size));
110 if (!page)
111 return NULL;
112
113 *dma_handle = phys_to_dma(dev, page_to_phys(page));
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000114 addr = page_address(page);
Marek Szyprowski6829e272015-04-23 12:46:16 +0100115 memset(addr, 0, size);
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000116 return addr;
Laura Abbott6ac21042013-12-12 19:28:33 +0000117 } else {
118 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
119 }
Catalin Marinas09b55412012-03-05 11:49:30 +0000120}
121
Ritesh Harjanibb10eb72014-02-06 17:21:51 +0530122static void __dma_free_coherent(struct device *dev, size_t size,
123 void *vaddr, dma_addr_t dma_handle,
124 struct dma_attrs *attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +0000125{
Laura Abbottd4932f92014-10-09 15:26:44 -0700126 bool freed;
127 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
128
Laura Abbottc666e8d2013-12-12 19:28:32 +0000129 if (dev == NULL) {
130 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
131 return;
132 }
133
Laura Abbottd4932f92014-10-09 15:26:44 -0700134 freed = dma_release_from_contiguous(dev,
Laura Abbott6ac21042013-12-12 19:28:33 +0000135 phys_to_page(paddr),
136 size >> PAGE_SHIFT);
Laura Abbottd4932f92014-10-09 15:26:44 -0700137 if (!freed)
Laura Abbott6ac21042013-12-12 19:28:33 +0000138 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
Catalin Marinas09b55412012-03-05 11:49:30 +0000139}
140
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000141static void *__dma_alloc(struct device *dev, size_t size,
142 dma_addr_t *dma_handle, gfp_t flags,
143 struct dma_attrs *attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100144{
Laura Abbottd4932f92014-10-09 15:26:44 -0700145 struct page *page;
Catalin Marinas73635902013-05-21 17:35:19 +0100146 void *ptr, *coherent_ptr;
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000147 bool coherent = is_device_dma_coherent(dev);
Catalin Marinas73635902013-05-21 17:35:19 +0100148
149 size = PAGE_ALIGN(size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700150
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000151 if (!coherent && !(flags & __GFP_WAIT)) {
Laura Abbottd4932f92014-10-09 15:26:44 -0700152 struct page *page = NULL;
Suzuki K. Poulose71328132015-03-19 18:17:09 +0000153 void *addr = __alloc_from_pool(size, &page, flags);
Laura Abbottd4932f92014-10-09 15:26:44 -0700154
155 if (addr)
156 *dma_handle = phys_to_dma(dev, page_to_phys(page));
157
158 return addr;
Laura Abbottd4932f92014-10-09 15:26:44 -0700159 }
Catalin Marinas73635902013-05-21 17:35:19 +0100160
161 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
162 if (!ptr)
163 goto no_mem;
Catalin Marinas73635902013-05-21 17:35:19 +0100164
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000165 /* no need for non-cacheable mapping if coherent */
166 if (coherent)
167 return ptr;
168
Catalin Marinas73635902013-05-21 17:35:19 +0100169 /* remove any dirty cache lines on the kernel alias */
170 __dma_flush_range(ptr, ptr + size);
171
172 /* create a coherent mapping */
173 page = virt_to_page(ptr);
Laura Abbottd4932f92014-10-09 15:26:44 -0700174 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
175 __get_dma_pgprot(attrs,
176 __pgprot(PROT_NORMAL_NC), false),
177 NULL);
Catalin Marinas73635902013-05-21 17:35:19 +0100178 if (!coherent_ptr)
179 goto no_map;
180
181 return coherent_ptr;
182
183no_map:
184 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
185no_mem:
Sean Paula52ce122014-10-01 16:31:50 +0100186 *dma_handle = DMA_ERROR_CODE;
Catalin Marinas73635902013-05-21 17:35:19 +0100187 return NULL;
188}
189
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000190static void __dma_free(struct device *dev, size_t size,
191 void *vaddr, dma_addr_t dma_handle,
192 struct dma_attrs *attrs)
Catalin Marinas73635902013-05-21 17:35:19 +0100193{
194 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
195
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000196 if (!is_device_dma_coherent(dev)) {
197 if (__free_from_pool(vaddr, size))
198 return;
199 vunmap(vaddr);
200 }
Catalin Marinas73635902013-05-21 17:35:19 +0100201 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
202}
203
204static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
205 unsigned long offset, size_t size,
206 enum dma_data_direction dir,
207 struct dma_attrs *attrs)
208{
209 dma_addr_t dev_addr;
210
211 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000212 if (!is_device_dma_coherent(dev))
213 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100214
215 return dev_addr;
216}
217
218
219static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
220 size_t size, enum dma_data_direction dir,
221 struct dma_attrs *attrs)
222{
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000223 if (!is_device_dma_coherent(dev))
224 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100225 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
226}
227
228static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
229 int nelems, enum dma_data_direction dir,
230 struct dma_attrs *attrs)
231{
232 struct scatterlist *sg;
233 int i, ret;
234
235 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000236 if (!is_device_dma_coherent(dev))
237 for_each_sg(sgl, sg, ret, i)
238 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
239 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100240
241 return ret;
242}
243
244static void __swiotlb_unmap_sg_attrs(struct device *dev,
245 struct scatterlist *sgl, int nelems,
246 enum dma_data_direction dir,
247 struct dma_attrs *attrs)
248{
249 struct scatterlist *sg;
250 int i;
251
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000252 if (!is_device_dma_coherent(dev))
253 for_each_sg(sgl, sg, nelems, i)
254 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
255 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100256 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
257}
258
259static void __swiotlb_sync_single_for_cpu(struct device *dev,
260 dma_addr_t dev_addr, size_t size,
261 enum dma_data_direction dir)
262{
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000263 if (!is_device_dma_coherent(dev))
264 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100265 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
266}
267
268static void __swiotlb_sync_single_for_device(struct device *dev,
269 dma_addr_t dev_addr, size_t size,
270 enum dma_data_direction dir)
271{
272 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000273 if (!is_device_dma_coherent(dev))
274 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100275}
276
277static void __swiotlb_sync_sg_for_cpu(struct device *dev,
278 struct scatterlist *sgl, int nelems,
279 enum dma_data_direction dir)
280{
281 struct scatterlist *sg;
282 int i;
283
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000284 if (!is_device_dma_coherent(dev))
285 for_each_sg(sgl, sg, nelems, i)
286 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
287 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100288 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
289}
290
291static void __swiotlb_sync_sg_for_device(struct device *dev,
292 struct scatterlist *sgl, int nelems,
293 enum dma_data_direction dir)
294{
295 struct scatterlist *sg;
296 int i;
297
298 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000299 if (!is_device_dma_coherent(dev))
300 for_each_sg(sgl, sg, nelems, i)
301 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
302 sg->length, dir);
Catalin Marinas73635902013-05-21 17:35:19 +0100303}
304
Laura Abbott6e8d7962014-03-14 19:52:23 +0000305/* vma->vm_page_prot must be set appropriately before calling this function */
306static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
307 void *cpu_addr, dma_addr_t dma_addr, size_t size)
308{
309 int ret = -ENXIO;
310 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
311 PAGE_SHIFT;
312 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
313 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
314 unsigned long off = vma->vm_pgoff;
315
316 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
317 return ret;
318
319 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
320 ret = remap_pfn_range(vma, vma->vm_start,
321 pfn + off,
322 vma->vm_end - vma->vm_start,
323 vma->vm_page_prot);
324 }
325
326 return ret;
327}
328
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000329static int __swiotlb_mmap(struct device *dev,
330 struct vm_area_struct *vma,
331 void *cpu_addr, dma_addr_t dma_addr, size_t size,
332 struct dma_attrs *attrs)
Laura Abbott6e8d7962014-03-14 19:52:23 +0000333{
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000334 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot,
335 is_device_dma_coherent(dev));
Laura Abbott6e8d7962014-03-14 19:52:23 +0000336 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
337}
338
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000339static struct dma_map_ops swiotlb_dma_ops = {
340 .alloc = __dma_alloc,
341 .free = __dma_free,
342 .mmap = __swiotlb_mmap,
Catalin Marinas73635902013-05-21 17:35:19 +0100343 .map_page = __swiotlb_map_page,
344 .unmap_page = __swiotlb_unmap_page,
345 .map_sg = __swiotlb_map_sg_attrs,
346 .unmap_sg = __swiotlb_unmap_sg_attrs,
347 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
348 .sync_single_for_device = __swiotlb_sync_single_for_device,
349 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
350 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
351 .dma_supported = swiotlb_dma_supported,
352 .mapping_error = swiotlb_dma_mapping_error,
353};
Catalin Marinas09b55412012-03-05 11:49:30 +0000354
Laura Abbottd4932f92014-10-09 15:26:44 -0700355static int __init atomic_pool_init(void)
356{
357 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
358 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
359 struct page *page;
360 void *addr;
361 unsigned int pool_size_order = get_order(atomic_pool_size);
362
363 if (dev_get_cma_area(NULL))
364 page = dma_alloc_from_contiguous(NULL, nr_pages,
365 pool_size_order);
366 else
367 page = alloc_pages(GFP_DMA, pool_size_order);
368
369 if (page) {
370 int ret;
371 void *page_addr = page_address(page);
372
373 memset(page_addr, 0, atomic_pool_size);
374 __dma_flush_range(page_addr, page_addr + atomic_pool_size);
375
376 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
377 if (!atomic_pool)
378 goto free_page;
379
380 addr = dma_common_contiguous_remap(page, atomic_pool_size,
381 VM_USERMAP, prot, atomic_pool_init);
382
383 if (!addr)
384 goto destroy_genpool;
385
386 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
387 page_to_phys(page),
388 atomic_pool_size, -1);
389 if (ret)
390 goto remove_mapping;
391
392 gen_pool_set_algo(atomic_pool,
393 gen_pool_first_fit_order_align,
394 (void *)PAGE_SHIFT);
395
396 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
397 atomic_pool_size / 1024);
398 return 0;
399 }
400 goto out;
401
402remove_mapping:
403 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
404destroy_genpool:
405 gen_pool_destroy(atomic_pool);
406 atomic_pool = NULL;
407free_page:
408 if (!dma_release_from_contiguous(NULL, page, nr_pages))
409 __free_pages(page, pool_size_order);
410out:
411 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
412 atomic_pool_size / 1024);
413 return -ENOMEM;
414}
415
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000416static int __init arm64_dma_init(void)
Catalin Marinas09b55412012-03-05 11:49:30 +0000417{
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000418 int ret;
Catalin Marinas36909512014-02-27 12:24:57 +0000419
Catalin Marinas9d3bfbb2015-01-12 20:48:53 +0000420 dma_ops = &swiotlb_dma_ops;
Catalin Marinas36909512014-02-27 12:24:57 +0000421
Catalin Marinasa1e50a82015-02-05 18:01:53 +0000422 ret = atomic_pool_init();
Laura Abbottd4932f92014-10-09 15:26:44 -0700423
424 return ret;
425}
426arch_initcall(arm64_dma_init);
Catalin Marinas09b55412012-03-05 11:49:30 +0000427
428#define PREALLOC_DMA_DEBUG_ENTRIES 4096
429
430static int __init dma_debug_do_init(void)
431{
432 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
433 return 0;
434}
435fs_initcall(dma_debug_do_init);