blob: d6ceb3df004736e21e5ac1170dceccdc677e2623 [file] [log] [blame]
Catalin Marinas09b55412012-03-05 11:49:30 +00001/*
2 * SWIOTLB-based DMA API implementation
3 *
4 * Copyright (C) 2012 ARM Ltd.
5 * Author: Catalin Marinas <catalin.marinas@arm.com>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License version 2 as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program. If not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include <linux/gfp.h>
21#include <linux/export.h>
22#include <linux/slab.h>
23#include <linux/dma-mapping.h>
Laura Abbott6ac21042013-12-12 19:28:33 +000024#include <linux/dma-contiguous.h>
Catalin Marinas09b55412012-03-05 11:49:30 +000025#include <linux/vmalloc.h>
26#include <linux/swiotlb.h>
27
28#include <asm/cacheflush.h>
29
30struct dma_map_ops *dma_ops;
31EXPORT_SYMBOL(dma_ops);
32
Laura Abbott214fdbe2014-03-14 19:52:24 +000033static pgprot_t __get_dma_pgprot(struct dma_attrs *attrs, pgprot_t prot,
34 bool coherent)
35{
Catalin Marinas196adf22014-03-24 10:35:35 +000036 if (!coherent || dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
Laura Abbott214fdbe2014-03-14 19:52:24 +000037 return pgprot_writecombine(prot);
Laura Abbott214fdbe2014-03-14 19:52:24 +000038 return prot;
39}
40
Ritesh Harjanibb10eb72014-02-06 17:21:51 +053041static void *__dma_alloc_coherent(struct device *dev, size_t size,
42 dma_addr_t *dma_handle, gfp_t flags,
43 struct dma_attrs *attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +000044{
Laura Abbottc666e8d2013-12-12 19:28:32 +000045 if (dev == NULL) {
46 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
47 return NULL;
48 }
49
Catalin Marinas19e76402014-02-27 12:09:22 +000050 if (IS_ENABLED(CONFIG_ZONE_DMA) &&
Catalin Marinas09b55412012-03-05 11:49:30 +000051 dev->coherent_dma_mask <= DMA_BIT_MASK(32))
Catalin Marinas19e76402014-02-27 12:09:22 +000052 flags |= GFP_DMA;
Laura Abbott6ac21042013-12-12 19:28:33 +000053 if (IS_ENABLED(CONFIG_DMA_CMA)) {
54 struct page *page;
55
Laura Abbottccc9e242014-02-04 23:08:57 +000056 size = PAGE_ALIGN(size);
Laura Abbott6ac21042013-12-12 19:28:33 +000057 page = dma_alloc_from_contiguous(dev, size >> PAGE_SHIFT,
58 get_order(size));
59 if (!page)
60 return NULL;
61
62 *dma_handle = phys_to_dma(dev, page_to_phys(page));
63 return page_address(page);
64 } else {
65 return swiotlb_alloc_coherent(dev, size, dma_handle, flags);
66 }
Catalin Marinas09b55412012-03-05 11:49:30 +000067}
68
Ritesh Harjanibb10eb72014-02-06 17:21:51 +053069static void __dma_free_coherent(struct device *dev, size_t size,
70 void *vaddr, dma_addr_t dma_handle,
71 struct dma_attrs *attrs)
Catalin Marinas09b55412012-03-05 11:49:30 +000072{
Laura Abbottc666e8d2013-12-12 19:28:32 +000073 if (dev == NULL) {
74 WARN_ONCE(1, "Use an actual device structure for DMA allocation\n");
75 return;
76 }
77
Laura Abbott6ac21042013-12-12 19:28:33 +000078 if (IS_ENABLED(CONFIG_DMA_CMA)) {
79 phys_addr_t paddr = dma_to_phys(dev, dma_handle);
80
81 dma_release_from_contiguous(dev,
82 phys_to_page(paddr),
83 size >> PAGE_SHIFT);
84 } else {
85 swiotlb_free_coherent(dev, size, vaddr, dma_handle);
86 }
Catalin Marinas09b55412012-03-05 11:49:30 +000087}
88
Catalin Marinas73635902013-05-21 17:35:19 +010089static void *__dma_alloc_noncoherent(struct device *dev, size_t size,
90 dma_addr_t *dma_handle, gfp_t flags,
91 struct dma_attrs *attrs)
92{
93 struct page *page, **map;
94 void *ptr, *coherent_ptr;
95 int order, i;
96
97 size = PAGE_ALIGN(size);
98 order = get_order(size);
99
100 ptr = __dma_alloc_coherent(dev, size, dma_handle, flags, attrs);
101 if (!ptr)
102 goto no_mem;
103 map = kmalloc(sizeof(struct page *) << order, flags & ~GFP_DMA);
104 if (!map)
105 goto no_map;
106
107 /* remove any dirty cache lines on the kernel alias */
108 __dma_flush_range(ptr, ptr + size);
109
110 /* create a coherent mapping */
111 page = virt_to_page(ptr);
112 for (i = 0; i < (size >> PAGE_SHIFT); i++)
113 map[i] = page + i;
114 coherent_ptr = vmap(map, size >> PAGE_SHIFT, VM_MAP,
Catalin Marinasa501e322014-04-03 15:57:15 +0100115 __get_dma_pgprot(attrs, __pgprot(PROT_NORMAL_NC), false));
Catalin Marinas73635902013-05-21 17:35:19 +0100116 kfree(map);
117 if (!coherent_ptr)
118 goto no_map;
119
120 return coherent_ptr;
121
122no_map:
123 __dma_free_coherent(dev, size, ptr, *dma_handle, attrs);
124no_mem:
125 *dma_handle = ~0;
126 return NULL;
127}
128
129static void __dma_free_noncoherent(struct device *dev, size_t size,
130 void *vaddr, dma_addr_t dma_handle,
131 struct dma_attrs *attrs)
132{
133 void *swiotlb_addr = phys_to_virt(dma_to_phys(dev, dma_handle));
134
135 vunmap(vaddr);
136 __dma_free_coherent(dev, size, swiotlb_addr, dma_handle, attrs);
137}
138
139static dma_addr_t __swiotlb_map_page(struct device *dev, struct page *page,
140 unsigned long offset, size_t size,
141 enum dma_data_direction dir,
142 struct dma_attrs *attrs)
143{
144 dma_addr_t dev_addr;
145
146 dev_addr = swiotlb_map_page(dev, page, offset, size, dir, attrs);
147 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
148
149 return dev_addr;
150}
151
152
153static void __swiotlb_unmap_page(struct device *dev, dma_addr_t dev_addr,
154 size_t size, enum dma_data_direction dir,
155 struct dma_attrs *attrs)
156{
157 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
158 swiotlb_unmap_page(dev, dev_addr, size, dir, attrs);
159}
160
161static int __swiotlb_map_sg_attrs(struct device *dev, struct scatterlist *sgl,
162 int nelems, enum dma_data_direction dir,
163 struct dma_attrs *attrs)
164{
165 struct scatterlist *sg;
166 int i, ret;
167
168 ret = swiotlb_map_sg_attrs(dev, sgl, nelems, dir, attrs);
169 for_each_sg(sgl, sg, ret, i)
170 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
171 sg->length, dir);
172
173 return ret;
174}
175
176static void __swiotlb_unmap_sg_attrs(struct device *dev,
177 struct scatterlist *sgl, int nelems,
178 enum dma_data_direction dir,
179 struct dma_attrs *attrs)
180{
181 struct scatterlist *sg;
182 int i;
183
184 for_each_sg(sgl, sg, nelems, i)
185 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
186 sg->length, dir);
187 swiotlb_unmap_sg_attrs(dev, sgl, nelems, dir, attrs);
188}
189
190static void __swiotlb_sync_single_for_cpu(struct device *dev,
191 dma_addr_t dev_addr, size_t size,
192 enum dma_data_direction dir)
193{
194 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
195 swiotlb_sync_single_for_cpu(dev, dev_addr, size, dir);
196}
197
198static void __swiotlb_sync_single_for_device(struct device *dev,
199 dma_addr_t dev_addr, size_t size,
200 enum dma_data_direction dir)
201{
202 swiotlb_sync_single_for_device(dev, dev_addr, size, dir);
203 __dma_map_area(phys_to_virt(dma_to_phys(dev, dev_addr)), size, dir);
204}
205
206static void __swiotlb_sync_sg_for_cpu(struct device *dev,
207 struct scatterlist *sgl, int nelems,
208 enum dma_data_direction dir)
209{
210 struct scatterlist *sg;
211 int i;
212
213 for_each_sg(sgl, sg, nelems, i)
214 __dma_unmap_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
215 sg->length, dir);
216 swiotlb_sync_sg_for_cpu(dev, sgl, nelems, dir);
217}
218
219static void __swiotlb_sync_sg_for_device(struct device *dev,
220 struct scatterlist *sgl, int nelems,
221 enum dma_data_direction dir)
222{
223 struct scatterlist *sg;
224 int i;
225
226 swiotlb_sync_sg_for_device(dev, sgl, nelems, dir);
227 for_each_sg(sgl, sg, nelems, i)
228 __dma_map_area(phys_to_virt(dma_to_phys(dev, sg->dma_address)),
229 sg->length, dir);
230}
231
Laura Abbott6e8d7962014-03-14 19:52:23 +0000232/* vma->vm_page_prot must be set appropriately before calling this function */
233static int __dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
234 void *cpu_addr, dma_addr_t dma_addr, size_t size)
235{
236 int ret = -ENXIO;
237 unsigned long nr_vma_pages = (vma->vm_end - vma->vm_start) >>
238 PAGE_SHIFT;
239 unsigned long nr_pages = PAGE_ALIGN(size) >> PAGE_SHIFT;
240 unsigned long pfn = dma_to_phys(dev, dma_addr) >> PAGE_SHIFT;
241 unsigned long off = vma->vm_pgoff;
242
243 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
244 return ret;
245
246 if (off < nr_pages && nr_vma_pages <= (nr_pages - off)) {
247 ret = remap_pfn_range(vma, vma->vm_start,
248 pfn + off,
249 vma->vm_end - vma->vm_start,
250 vma->vm_page_prot);
251 }
252
253 return ret;
254}
255
256static int __swiotlb_mmap_noncoherent(struct device *dev,
257 struct vm_area_struct *vma,
258 void *cpu_addr, dma_addr_t dma_addr, size_t size,
259 struct dma_attrs *attrs)
260{
Laura Abbott214fdbe2014-03-14 19:52:24 +0000261 vma->vm_page_prot = __get_dma_pgprot(attrs, vma->vm_page_prot, false);
Laura Abbott6e8d7962014-03-14 19:52:23 +0000262 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
263}
264
265static int __swiotlb_mmap_coherent(struct device *dev,
266 struct vm_area_struct *vma,
267 void *cpu_addr, dma_addr_t dma_addr, size_t size,
268 struct dma_attrs *attrs)
269{
270 /* Just use whatever page_prot attributes were specified */
271 return __dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
272}
273
Catalin Marinas73635902013-05-21 17:35:19 +0100274struct dma_map_ops noncoherent_swiotlb_dma_ops = {
275 .alloc = __dma_alloc_noncoherent,
276 .free = __dma_free_noncoherent,
Laura Abbott6e8d7962014-03-14 19:52:23 +0000277 .mmap = __swiotlb_mmap_noncoherent,
Catalin Marinas73635902013-05-21 17:35:19 +0100278 .map_page = __swiotlb_map_page,
279 .unmap_page = __swiotlb_unmap_page,
280 .map_sg = __swiotlb_map_sg_attrs,
281 .unmap_sg = __swiotlb_unmap_sg_attrs,
282 .sync_single_for_cpu = __swiotlb_sync_single_for_cpu,
283 .sync_single_for_device = __swiotlb_sync_single_for_device,
284 .sync_sg_for_cpu = __swiotlb_sync_sg_for_cpu,
285 .sync_sg_for_device = __swiotlb_sync_sg_for_device,
286 .dma_supported = swiotlb_dma_supported,
287 .mapping_error = swiotlb_dma_mapping_error,
288};
289EXPORT_SYMBOL(noncoherent_swiotlb_dma_ops);
290
291struct dma_map_ops coherent_swiotlb_dma_ops = {
Ritesh Harjanibb10eb72014-02-06 17:21:51 +0530292 .alloc = __dma_alloc_coherent,
293 .free = __dma_free_coherent,
Laura Abbott6e8d7962014-03-14 19:52:23 +0000294 .mmap = __swiotlb_mmap_coherent,
Catalin Marinas09b55412012-03-05 11:49:30 +0000295 .map_page = swiotlb_map_page,
296 .unmap_page = swiotlb_unmap_page,
297 .map_sg = swiotlb_map_sg_attrs,
298 .unmap_sg = swiotlb_unmap_sg_attrs,
299 .sync_single_for_cpu = swiotlb_sync_single_for_cpu,
300 .sync_single_for_device = swiotlb_sync_single_for_device,
301 .sync_sg_for_cpu = swiotlb_sync_sg_for_cpu,
302 .sync_sg_for_device = swiotlb_sync_sg_for_device,
303 .dma_supported = swiotlb_dma_supported,
304 .mapping_error = swiotlb_dma_mapping_error,
305};
Catalin Marinas73635902013-05-21 17:35:19 +0100306EXPORT_SYMBOL(coherent_swiotlb_dma_ops);
Catalin Marinas09b55412012-03-05 11:49:30 +0000307
Catalin Marinas36909512014-02-27 12:24:57 +0000308extern int swiotlb_late_init_with_default_size(size_t default_size);
309
310static int __init swiotlb_late_init(void)
Catalin Marinas09b55412012-03-05 11:49:30 +0000311{
Catalin Marinas36909512014-02-27 12:24:57 +0000312 size_t swiotlb_size = min(SZ_64M, MAX_ORDER_NR_PAGES << PAGE_SHIFT);
313
Ritesh Harjanic7a4a762014-04-23 06:29:46 +0100314 dma_ops = &noncoherent_swiotlb_dma_ops;
Catalin Marinas36909512014-02-27 12:24:57 +0000315
316 return swiotlb_late_init_with_default_size(swiotlb_size);
Catalin Marinas09b55412012-03-05 11:49:30 +0000317}
Catalin Marinas6ecba8e2014-04-25 15:31:45 +0100318arch_initcall(swiotlb_late_init);
Catalin Marinas09b55412012-03-05 11:49:30 +0000319
320#define PREALLOC_DMA_DEBUG_ENTRIES 4096
321
322static int __init dma_debug_do_init(void)
323{
324 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
325 return 0;
326}
327fs_initcall(dma_debug_do_init);