blob: d9209420391381d41aa97665c3ce1dd08456c466 [file] [log] [blame]
Catalin Marinas09b55412012-03-05 11:49:30 +00001/*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/gfp.h>
21#include <linux/export.h>
22#include <linux/slab.h>
Laura Abbottd4932f92014-10-09 15:26:44 -070023#include <linux/genalloc.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000024#include <linux/dma-mapping.h>
Laura Abbott6ac21042013-12-12 19:28:33 +000025#include <linux/dma-contiguous.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000026#include <linux/vmalloc.h>
27#include <linux/swiotlb.h>
28
29#include <asm/cacheflush.h>
30
31struct dma_map_ops *dma_ops;
32EXPORT_SYMBOL(dma_ops);
33
Laura Abbott214fdbe2014-03-14 19:52:24 +000034static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
35 bool coherent)
36{
Catalin Marinas196adf22014-03-24 10:35:35 +000037 if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
Laura Abbott214fdbe2014-03-14 19:52:24 +000038 return pgprot_writecombine(prot);
Laura Abbott214fdbe2014-03-14 19:52:24 +000039 return prot;
40}
41
Laura Abbottd4932f92014-10-09 15:26:44 -070042static struct gen_pool *atomic_pool;
43
44#define DEFAULT_DMA_COHERENT_POOL_SIZE SZ_256K
45static size_t atomic_pool_size = DEFAULT_DMA_COHERENT_POOL_SIZE;
46
47static int __init early_coherent_pool(char *p)
48{
49 atomic_pool_size = memparse(p, &p);
50 return 0;
51}
52early_param("coherent_pool", early_coherent_pool);
53
54static void *__alloc_from_pool(size_t size, struct page **ret_page)
55{
56 unsigned long val;
57 void *ptr = NULL;
58
59 if (!atomic_pool) {
60 WARN(1, "coherent pool not initialised!\n");
61 return NULL;
62 }
63
64 val = gen_pool_alloc(atomic_pool, size);
65 if (val) {
66 phys_addr_t phys = gen_pool_virt_to_phys(atomic_pool, val);
67
68 *ret_page = phys_to_page(phys);
69 ptr = (void *)val;
70 }
71
72 return ptr;
73}
74
75static bool __in_atomic_pool(void *start, size_t size)
76{
77 return addr_in_gen_pool(atomic_pool, (unsigned long)start, size);
78}
79
80static int __free_from_pool(void *start, size_t size)
81{
82 if (!__in_atomic_pool(start, size))
83 return 0;
84
85 gen_pool_free(atomic_pool, (unsigned long)start, size);
86
87 return 1;
88}
89
Ritesh Harjanibb10eb72014-02-06 17:21:51 +053090static void *__dma_alloc_coherent(struct device *dev, size_t size,
91 dma_addr_t *dma_handle, gfp_t flags,
92 struct dma_attrs *attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +000093{
Laura Abbottc666e8d2013-12-12 19:28:32 +000094 if (dev == NULL) {
95 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
96 return NULL;
97 }
98
Catalin Marinas19e76402014-02-27 12:09:22 +000099 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
Catalin Marinas09b55412012-03-05 11:49:30 +0000100 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
Catalin Marinas19e76402014-02-27 12:09:22 +0000101 flags |= GFP_DMA;
Laura Abbottd4932f92014-10-09 15:26:44 -0700102 if (IS_ENABLED(CONFIG_DMA_CMA) && (flags & __GFP_WAIT)) {
Laura Abbott6ac21042013-12-12 19:28:33 +0000103 struct page *page;
104
Laura Abbottccc9e242014-02-04 23:08:57 +0000105 size = PAGE_ALIGN(size);
Laura Abbott6ac21042013-12-12 19:28:33 +0000106 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
107 get_order(size));
108 if (!page)
109 return NULL;
110
111 *dma_handle = phys_to_dma(dev, page_to_phys(page));
112 return page_address(page);
113 } else {
114 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
115 }
Catalin Marinas09b55412012-03-05 11:49:30 +0000116}
117
Ritesh Harjanibb10eb72014-02-06 17:21:51 +0530118static void __dma_free_coherent(struct device *dev, size_t size,
119 void *vaddr, dma_addr_t dma_handle,
120 struct dma_attrs *attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +0000121{
Laura Abbottd4932f92014-10-09 15:26:44 -0700122 bool freed;
123 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
124
Laura Abbottc666e8d2013-12-12 19:28:32 +0000125 if (dev == NULL) {
126 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
127 return;
128 }
129
Laura Abbottd4932f92014-10-09 15:26:44 -0700130 freed = dma_release_from_contiguous(dev,
Laura Abbott6ac21042013-12-12 19:28:33 +0000131 phys_to_page(paddr),
132 size >> PAGE_SHIFT);
Laura Abbottd4932f92014-10-09 15:26:44 -0700133 if (!freed)
Laura Abbott6ac21042013-12-12 19:28:33 +0000134 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
Catalin Marinas09b55412012-03-05 11:49:30 +0000135}
136
Catalin Marinas73635902013-05-21 17:35:19 +0100137static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
138 dma_addr_t *dma_handle, gfp_t flags,
139 struct dma_attrs *attrs)
140{
Laura Abbottd4932f92014-10-09 15:26:44 -0700141 struct page *page;
Catalin Marinas73635902013-05-21 17:35:19 +0100142 void *ptr, *coherent_ptr;
Catalin Marinas73635902013-05-21 17:35:19 +0100143
144 size = PAGE_ALIGN(size);
Laura Abbottd4932f92014-10-09 15:26:44 -0700145
146 if (!(flags & __GFP_WAIT)) {
147 struct page *page = NULL;
148 void *addr = __alloc_from_pool(size, &page);
149
150 if (addr)
151 *dma_handle = phys_to_dma(dev, page_to_phys(page));
152
153 return addr;
154
155 }
Catalin Marinas73635902013-05-21 17:35:19 +0100156
157 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
158 if (!ptr)
159 goto no_mem;
Catalin Marinas73635902013-05-21 17:35:19 +0100160
161 /* remove any dirty cache lines on the kernel alias */
162 __dma_flush_range(ptr, ptr + size);
163
164 /* create a coherent mapping */
165 page = virt_to_page(ptr);
Laura Abbottd4932f92014-10-09 15:26:44 -0700166 coherent_ptr = dma_common_contiguous_remap(page, size, VM_USERMAP,
167 __get_dma_pgprot(attrs,
168 __pgprot(PROT_NORMAL_NC), false),
169 NULL);
Catalin Marinas73635902013-05-21 17:35:19 +0100170 if (!coherent_ptr)
171 goto no_map;
172
173 return coherent_ptr;
174
175no_map:
176 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
177no_mem:
Sean Paula52ce122014-10-01 16:31:50 +0100178 *dma_handle = DMA_ERROR_CODE;
Catalin Marinas73635902013-05-21 17:35:19 +0100179 return NULL;
180}
181
182static void __dma_free_noncoherent(struct device *dev, size_t size,
183 void *vaddr, dma_addr_t dma_handle,
184 struct dma_attrs *attrs)
185{
186 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
187
Laura Abbottd4932f92014-10-09 15:26:44 -0700188 if (__free_from_pool(vaddr, size))
189 return;
Catalin Marinas73635902013-05-21 17:35:19 +0100190 vunmap(vaddr);
191 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
192}
193
194static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
195 unsigned long offset, size_t size,
196 enum dma_data_direction dir,
197 struct dma_attrs *attrs)
198{
199 dma_addr_t dev_addr;
200
201 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
202 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
203
204 return dev_addr;
205}
206
207
208static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
209 size_t size, enum dma_data_direction dir,
210 struct dma_attrs *attrs)
211{
212 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
213 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
214}
215
216static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
217 int nelems, enum dma_data_direction dir,
218 struct dma_attrs *attrs)
219{
220 struct scatterlist *sg;
221 int i, ret;
222
223 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
224 for_each_sg(sgl, sg, ret, i)
225 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
226 sg->length, dir);
227
228 return ret;
229}
230
231static void __swiotlb_unmap_sg_attrs(struct device *dev,
232 struct scatterlist *sgl, int nelems,
233 enum dma_data_direction dir,
234 struct dma_attrs *attrs)
235{
236 struct scatterlist *sg;
237 int i;
238
239 for_each_sg(sgl, sg, nelems, i)
240 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
241 sg->length, dir);
242 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
243}
244
245static void __swiotlb_sync_single_for_cpu(struct device *dev,
246 dma_addr_t dev_addr, size_t size,
247 enum dma_data_direction dir)
248{
249 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
250 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
251}
252
253static void __swiotlb_sync_single_for_device(struct device *dev,
254 dma_addr_t dev_addr, size_t size,
255 enum dma_data_direction dir)
256{
257 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
258 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
259}
260
261static void __swiotlb_sync_sg_for_cpu(struct device *dev,
262 struct scatterlist *sgl, int nelems,
263 enum dma_data_direction dir)
264{
265 struct scatterlist *sg;
266 int i;
267
268 for_each_sg(sgl, sg, nelems, i)
269 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
270 sg->length, dir);
271 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
272}
273
274static void __swiotlb_sync_sg_for_device(struct device *dev,
275 struct scatterlist *sgl, int nelems,
276 enum dma_data_direction dir)
277{
278 struct scatterlist *sg;
279 int i;
280
281 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
282 for_each_sg(sgl, sg, nelems, i)
283 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
284 sg->length, dir);
285}
286
Laura Abbott6e8d7962014-03-14 19:52:23 +0000287/* vma->vm_page_prot must be set appropriately before calling this function */
288static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
289 void *cpu_addr, dma_addr_t dma_addr, size_t size)
290{
291 int ret = -ENXIO;
292 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
293 PAGE_SHIFT;
294 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
295 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
296 unsigned long off = vma->vm_pgoff;
297
298 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
299 return ret;
300
301 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
302 ret = remap_pfn_range(vma, vma->vm_start,
303 pfn + off,
304 vma->vm_end - vma->vm_start,
305 vma->vm_page_prot);
306 }
307
308 return ret;
309}
310
311static int __swiotlb_mmap_noncoherent(struct device *dev,
312 struct vm_area_struct *vma,
313 void *cpu_addr, dma_addr_t dma_addr, size_t size,
314 struct dma_attrs *attrs)
315{
Laura Abbott214fdbe2014-03-14 19:52:24 +0000316 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
Laura Abbott6e8d7962014-03-14 19:52:23 +0000317 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
318}
319
320static int __swiotlb_mmap_coherent(struct device *dev,
321 struct vm_area_struct *vma,
322 void *cpu_addr, dma_addr_t dma_addr, size_t size,
323 struct dma_attrs *attrs)
324{
325 /* Just use whatever page_prot attributes were specified */
326 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
327}
328
Catalin Marinas73635902013-05-21 17:35:19 +0100329struct dma_map_ops noncoherent_swiotlb_dma_ops = {
330 .alloc = __dma_alloc_noncoherent,
331 .free = __dma_free_noncoherent,
Laura Abbott6e8d7962014-03-14 19:52:23 +0000332 .mmap = __swiotlb_mmap_noncoherent,
Catalin Marinas73635902013-05-21 17:35:19 +0100333 .map_page = __swiotlb_map_page,
334 .unmap_page = __swiotlb_unmap_page,
335 .map_sg = __swiotlb_map_sg_attrs,
336 .unmap_sg = __swiotlb_unmap_sg_attrs,
337 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
338 .sync_single_for_device = __swiotlb_sync_single_for_device,
339 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
340 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
341 .dma_supported = swiotlb_dma_supported,
342 .mapping_error = swiotlb_dma_mapping_error,
343};
344EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
345
346struct dma_map_ops coherent_swiotlb_dma_ops = {
Ritesh Harjanibb10eb72014-02-06 17:21:51 +0530347 .alloc = __dma_alloc_coherent,
348 .free = __dma_free_coherent,
Laura Abbott6e8d7962014-03-14 19:52:23 +0000349 .mmap = __swiotlb_mmap_coherent,
Catalin Marinas09b55412012-03-05 11:49:30 +0000350 .map_page = swiotlb_map_page,
351 .unmap_page = swiotlb_unmap_page,
352 .map_sg = swiotlb_map_sg_attrs,
353 .unmap_sg = swiotlb_unmap_sg_attrs,
354 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
355 .sync_single_for_device = swiotlb_sync_single_for_device,
356 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
357 .sync_sg_for_device = swiotlb_sync_sg_for_device,
358 .dma_supported = swiotlb_dma_supported,
359 .mapping_error = swiotlb_dma_mapping_error,
360};
Catalin Marinas73635902013-05-21 17:35:19 +0100361EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
Catalin Marinas09b55412012-03-05 11:49:30 +0000362
Catalin Marinas36909512014-02-27 12:24:57 +0000363extern int swiotlb_late_init_with_default_size(size_t default_size);
364
Laura Abbottd4932f92014-10-09 15:26:44 -0700365static int __init atomic_pool_init(void)
366{
367 pgprot_t prot = __pgprot(PROT_NORMAL_NC);
368 unsigned long nr_pages = atomic_pool_size >> PAGE_SHIFT;
369 struct page *page;
370 void *addr;
371 unsigned int pool_size_order = get_order(atomic_pool_size);
372
373 if (dev_get_cma_area(NULL))
374 page = dma_alloc_from_contiguous(NULL, nr_pages,
375 pool_size_order);
376 else
377 page = alloc_pages(GFP_DMA, pool_size_order);
378
379 if (page) {
380 int ret;
381 void *page_addr = page_address(page);
382
383 memset(page_addr, 0, atomic_pool_size);
384 __dma_flush_range(page_addr, page_addr + atomic_pool_size);
385
386 atomic_pool = gen_pool_create(PAGE_SHIFT, -1);
387 if (!atomic_pool)
388 goto free_page;
389
390 addr = dma_common_contiguous_remap(page, atomic_pool_size,
391 VM_USERMAP, prot, atomic_pool_init);
392
393 if (!addr)
394 goto destroy_genpool;
395
396 ret = gen_pool_add_virt(atomic_pool, (unsigned long)addr,
397 page_to_phys(page),
398 atomic_pool_size, -1);
399 if (ret)
400 goto remove_mapping;
401
402 gen_pool_set_algo(atomic_pool,
403 gen_pool_first_fit_order_align,
404 (void *)PAGE_SHIFT);
405
406 pr_info("DMA: preallocated %zu KiB pool for atomic allocations\n",
407 atomic_pool_size / 1024);
408 return 0;
409 }
410 goto out;
411
412remove_mapping:
413 dma_common_free_remap(addr, atomic_pool_size, VM_USERMAP);
414destroy_genpool:
415 gen_pool_destroy(atomic_pool);
416 atomic_pool = NULL;
417free_page:
418 if (!dma_release_from_contiguous(NULL, page, nr_pages))
419 __free_pages(page, pool_size_order);
420out:
421 pr_err("DMA: failed to allocate %zu KiB pool for atomic coherent allocation\n",
422 atomic_pool_size / 1024);
423 return -ENOMEM;
424}
425
Catalin Marinas36909512014-02-27 12:24:57 +0000426static int __init swiotlb_late_init(void)
Catalin Marinas09b55412012-03-05 11:49:30 +0000427{
Catalin Marinas36909512014-02-27 12:24:57 +0000428 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
429
Ritesh Harjanic7a4a762014-04-23 06:29:46 +0100430 dma_ops = &noncoherent_swiotlb_dma_ops;
Catalin Marinas36909512014-02-27 12:24:57 +0000431
432 return swiotlb_late_init_with_default_size(swiotlb_size);
Catalin Marinas09b55412012-03-05 11:49:30 +0000433}
Laura Abbottd4932f92014-10-09 15:26:44 -0700434
435static int __init arm64_dma_init(void)
436{
437 int ret = 0;
438
439 ret |= swiotlb_late_init();
440 ret |= atomic_pool_init();
441
442 return ret;
443}
444arch_initcall(arm64_dma_init);
Catalin Marinas09b55412012-03-05 11:49:30 +0000445
446#define PREALLOC_DMA_DEBUG_ENTRIES 4096
447
448static int __init dma_debug_do_init(void)
449{
450 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
451 return 0;
452}
453fs_initcall(dma_debug_do_init);