blob: f0eb29917d9aa3443a44c404f7ca3b5762eb0a99 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
Ralf Baechle9a88cbb2006-11-16 02:56:12 +00007 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/types.h>
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000012#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/mm.h>
14#include <linux/module.h>
15#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17#include <asm/cache.h>
18#include <asm/io.h>
19
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000020#include <dma-coherence.h>
21
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010022static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
23{
24 unsigned long addr = plat_dma_addr_to_phys(dma_addr);
25
26 return (unsigned long)phys_to_virt(addr);
27}
28
Linus Torvalds1da177e2005-04-16 15:20:36 -070029/*
30 * Warning on the terminology - Linux calls an uncached area coherent;
31 * MIPS terminology calls memory areas with hardware maintained coherency
32 * coherent.
33 */
34
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000035static inline int cpu_is_noncoherent_r10000(struct device *dev)
36{
37 return !plat_device_is_coherent(dev) &&
38 (current_cpu_data.cputype == CPU_R10000 &&
39 current_cpu_data.cputype == CPU_R12000);
40}
41
Linus Torvalds1da177e2005-04-16 15:20:36 -070042void *dma_alloc_noncoherent(struct device *dev, size_t size,
Al Viro185a8ff2005-10-21 03:21:23 -040043 dma_addr_t * dma_handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070044{
45 void *ret;
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000046
Linus Torvalds1da177e2005-04-16 15:20:36 -070047 /* ignore region specifiers */
48 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
49
50 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
51 gfp |= GFP_DMA;
52 ret = (void *) __get_free_pages(gfp, get_order(size));
53
54 if (ret != NULL) {
55 memset(ret, 0, size);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000056 *dma_handle = plat_map_dma_mem(dev, ret, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 }
58
59 return ret;
60}
61
62EXPORT_SYMBOL(dma_alloc_noncoherent);
63
64void *dma_alloc_coherent(struct device *dev, size_t size,
Al Viro185a8ff2005-10-21 03:21:23 -040065 dma_addr_t * dma_handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
67 void *ret;
68
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000069 /* ignore region specifiers */
70 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
71
72 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
73 gfp |= GFP_DMA;
74 ret = (void *) __get_free_pages(gfp, get_order(size));
75
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 if (ret) {
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000077 memset(ret, 0, size);
78 *dma_handle = plat_map_dma_mem(dev, ret, size);
79
80 if (!plat_device_is_coherent(dev)) {
81 dma_cache_wback_inv((unsigned long) ret, size);
82 ret = UNCAC_ADDR(ret);
83 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 }
85
86 return ret;
87}
88
89EXPORT_SYMBOL(dma_alloc_coherent);
90
91void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
92 dma_addr_t dma_handle)
93{
94 free_pages((unsigned long) vaddr, get_order(size));
95}
96
97EXPORT_SYMBOL(dma_free_noncoherent);
98
99void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
100 dma_addr_t dma_handle)
101{
102 unsigned long addr = (unsigned long) vaddr;
103
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000104 if (!plat_device_is_coherent(dev))
105 addr = CAC_ADDR(addr);
106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 free_pages(addr, get_order(size));
108}
109
110EXPORT_SYMBOL(dma_free_coherent);
111
112static inline void __dma_sync(unsigned long addr, size_t size,
113 enum dma_data_direction direction)
114{
115 switch (direction) {
116 case DMA_TO_DEVICE:
117 dma_cache_wback(addr, size);
118 break;
119
120 case DMA_FROM_DEVICE:
121 dma_cache_inv(addr, size);
122 break;
123
124 case DMA_BIDIRECTIONAL:
125 dma_cache_wback_inv(addr, size);
126 break;
127
128 default:
129 BUG();
130 }
131}
132
133dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
134 enum dma_data_direction direction)
135{
136 unsigned long addr = (unsigned long) ptr;
137
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000138 if (!plat_device_is_coherent(dev))
139 __dma_sync(addr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000141 return plat_map_dma_mem(dev, ptr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142}
143
144EXPORT_SYMBOL(dma_map_single);
145
146void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
147 enum dma_data_direction direction)
148{
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000149 if (cpu_is_noncoherent_r10000(dev))
Franck Bui-Huuc9d06962007-03-19 17:36:42 +0100150 __dma_sync(dma_addr_to_virt(dma_addr), size,
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000151 direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000153 plat_unmap_dma_mem(dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154}
155
156EXPORT_SYMBOL(dma_unmap_single);
157
158int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
159 enum dma_data_direction direction)
160{
161 int i;
162
163 BUG_ON(direction == DMA_NONE);
164
165 for (i = 0; i < nents; i++, sg++) {
166 unsigned long addr;
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700167
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 addr = (unsigned long) page_address(sg->page);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000169 if (!plat_device_is_coherent(dev) && addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 __dma_sync(addr + sg->offset, sg->length, direction);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000171 sg->dma_address = plat_map_dma_mem_page(dev, sg->page) +
172 sg->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 }
174
175 return nents;
176}
177
178EXPORT_SYMBOL(dma_map_sg);
179
180dma_addr_t dma_map_page(struct device *dev, struct page *page,
181 unsigned long offset, size_t size, enum dma_data_direction direction)
182{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 BUG_ON(direction == DMA_NONE);
184
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000185 if (!plat_device_is_coherent(dev)) {
186 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000188 addr = (unsigned long) page_address(page) + offset;
189 dma_cache_wback_inv(addr, size);
190 }
191
192 return plat_map_dma_mem_page(dev, page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193}
194
195EXPORT_SYMBOL(dma_map_page);
196
197void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
198 enum dma_data_direction direction)
199{
200 BUG_ON(direction == DMA_NONE);
201
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000202 if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 unsigned long addr;
204
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000205 addr = plat_dma_addr_to_phys(dma_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 dma_cache_wback_inv(addr, size);
207 }
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000208
209 plat_unmap_dma_mem(dma_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
211
212EXPORT_SYMBOL(dma_unmap_page);
213
214void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
215 enum dma_data_direction direction)
216{
217 unsigned long addr;
218 int i;
219
220 BUG_ON(direction == DMA_NONE);
221
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 for (i = 0; i < nhwentries; i++, sg++) {
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000223 if (!plat_device_is_coherent(dev) &&
224 direction != DMA_TO_DEVICE) {
225 addr = (unsigned long) page_address(sg->page);
226 if (addr)
227 __dma_sync(addr + sg->offset, sg->length,
228 direction);
229 }
230 plat_unmap_dma_mem(sg->dma_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 }
232}
233
234EXPORT_SYMBOL(dma_unmap_sg);
235
236void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
237 size_t size, enum dma_data_direction direction)
238{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 BUG_ON(direction == DMA_NONE);
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700240
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000241 if (cpu_is_noncoherent_r10000(dev)) {
242 unsigned long addr;
243
Franck Bui-Huuc9d06962007-03-19 17:36:42 +0100244 addr = dma_addr_to_virt(dma_handle);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000245 __dma_sync(addr, size, direction);
246 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247}
248
249EXPORT_SYMBOL(dma_sync_single_for_cpu);
250
251void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
252 size_t size, enum dma_data_direction direction)
253{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 BUG_ON(direction == DMA_NONE);
255
Thomas Bogendoerfer9b43fb62007-02-23 19:58:48 +0100256 if (!plat_device_is_coherent(dev)) {
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000257 unsigned long addr;
258
Franck Bui-Huuc9d06962007-03-19 17:36:42 +0100259 addr = dma_addr_to_virt(dma_handle);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000260 __dma_sync(addr, size, direction);
261 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262}
263
264EXPORT_SYMBOL(dma_sync_single_for_device);
265
266void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
267 unsigned long offset, size_t size, enum dma_data_direction direction)
268{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 BUG_ON(direction == DMA_NONE);
270
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000271 if (cpu_is_noncoherent_r10000(dev)) {
272 unsigned long addr;
273
Franck Bui-Huuc9d06962007-03-19 17:36:42 +0100274 addr = dma_addr_to_virt(dma_handle);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000275 __dma_sync(addr + offset, size, direction);
276 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277}
278
279EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
280
281void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
282 unsigned long offset, size_t size, enum dma_data_direction direction)
283{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 BUG_ON(direction == DMA_NONE);
285
Thomas Bogendoerfer9b43fb62007-02-23 19:58:48 +0100286 if (!plat_device_is_coherent(dev)) {
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000287 unsigned long addr;
288
Franck Bui-Huuc9d06962007-03-19 17:36:42 +0100289 addr = dma_addr_to_virt(dma_handle);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000290 __dma_sync(addr + offset, size, direction);
291 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292}
293
294EXPORT_SYMBOL(dma_sync_single_range_for_device);
295
296void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
297 enum dma_data_direction direction)
298{
299 int i;
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700300
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 BUG_ON(direction == DMA_NONE);
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700302
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 /* Make sure that gcc doesn't leave the empty loop body. */
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000304 for (i = 0; i < nelems; i++, sg++) {
Ralf Baechle5b648a92007-03-02 11:42:11 +0000305 if (cpu_is_noncoherent_r10000(dev))
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000306 __dma_sync((unsigned long)page_address(sg->page),
307 sg->length, direction);
308 plat_unmap_dma_mem(sg->dma_address);
309 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310}
311
312EXPORT_SYMBOL(dma_sync_sg_for_cpu);
313
314void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
315 enum dma_data_direction direction)
316{
317 int i;
318
319 BUG_ON(direction == DMA_NONE);
320
321 /* Make sure that gcc doesn't leave the empty loop body. */
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000322 for (i = 0; i < nelems; i++, sg++) {
323 if (!plat_device_is_coherent(dev))
324 __dma_sync((unsigned long)page_address(sg->page),
325 sg->length, direction);
326 plat_unmap_dma_mem(sg->dma_address);
327 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328}
329
330EXPORT_SYMBOL(dma_sync_sg_for_device);
331
332int dma_mapping_error(dma_addr_t dma_addr)
333{
334 return 0;
335}
336
337EXPORT_SYMBOL(dma_mapping_error);
338
339int dma_supported(struct device *dev, u64 mask)
340{
341 /*
342 * we fall back to GFP_DMA when the mask isn't all 1s,
343 * so we can't guarantee allocations that must be
344 * within a tighter range than GFP_DMA..
345 */
346 if (mask < 0x00ffffff)
347 return 0;
348
349 return 1;
350}
351
352EXPORT_SYMBOL(dma_supported);
353
Ralf Baechlef67637e2006-12-06 20:38:54 -0800354int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000356 return plat_device_is_coherent(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357}
358
359EXPORT_SYMBOL(dma_is_consistent);
360
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800361void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000362 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363{
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000364 BUG_ON(direction == DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000366 if (!plat_device_is_coherent(dev))
367 dma_cache_wback_inv((unsigned long)vaddr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368}
369
370EXPORT_SYMBOL(dma_cache_sync);