blob: 810535dd091be1f1aaff24c6403f0686b8b01d63 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
Ralf Baechle9a88cbb2006-11-16 02:56:12 +00007 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/types.h>
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000012#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/mm.h>
14#include <linux/module.h>
Jens Axboe4fcc47a2007-10-23 12:32:34 +020015#include <linux/scatterlist.h>
Ralf Baechle6e86b0b2007-10-29 19:35:33 +000016#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18#include <asm/cache.h>
19#include <asm/io.h>
20
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000021#include <dma-coherence.h>
22
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010023static inline unsigned long dma_addr_to_virt(dma_addr_t dma_addr)
24{
25 unsigned long addr = plat_dma_addr_to_phys(dma_addr);
26
27 return (unsigned long)phys_to_virt(addr);
28}
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030/*
31 * Warning on the terminology - Linux calls an uncached area coherent;
32 * MIPS terminology calls memory areas with hardware maintained coherency
33 * coherent.
34 */
35
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000036static inline int cpu_is_noncoherent_r10000(struct device *dev)
37{
38 return !plat_device_is_coherent(dev) &&
Ralf Baechle10cc3522007-10-11 23:46:15 +010039 (current_cpu_type() == CPU_R10000 ||
40 current_cpu_type() == CPU_R12000);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000041}
42
Ralf Baechlecce335a2007-11-03 02:05:43 +000043static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
44{
45 /* ignore region specifiers */
46 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
47
Thomas Bogendoerfer32016712007-12-30 12:45:40 +010048#ifdef CONFIG_ZONE_DMA
Ralf Baechlecce335a2007-11-03 02:05:43 +000049 if (dev == NULL)
50 gfp |= __GFP_DMA;
51 else if (dev->coherent_dma_mask < DMA_BIT_MASK(24))
52 gfp |= __GFP_DMA;
53 else
54#endif
55#ifdef CONFIG_ZONE_DMA32
56 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
57 gfp |= __GFP_DMA32;
58 else
59#endif
60 ;
61
62 /* Don't invoke OOM killer */
63 gfp |= __GFP_NORETRY;
64
65 return gfp;
66}
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068void *dma_alloc_noncoherent(struct device *dev, size_t size,
Al Viro185a8ff2005-10-21 03:21:23 -040069 dma_addr_t * dma_handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070070{
71 void *ret;
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000072
Ralf Baechlecce335a2007-11-03 02:05:43 +000073 gfp = massage_gfp_flags(dev, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 ret = (void *) __get_free_pages(gfp, get_order(size));
76
77 if (ret != NULL) {
78 memset(ret, 0, size);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000079 *dma_handle = plat_map_dma_mem(dev, ret, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 }
81
82 return ret;
83}
84
85EXPORT_SYMBOL(dma_alloc_noncoherent);
86
87void *dma_alloc_coherent(struct device *dev, size_t size,
Al Viro185a8ff2005-10-21 03:21:23 -040088 dma_addr_t * dma_handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089{
90 void *ret;
91
Ralf Baechlecce335a2007-11-03 02:05:43 +000092 gfp = massage_gfp_flags(dev, gfp);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000093
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000094 ret = (void *) __get_free_pages(gfp, get_order(size));
95
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 if (ret) {
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000097 memset(ret, 0, size);
98 *dma_handle = plat_map_dma_mem(dev, ret, size);
99
100 if (!plat_device_is_coherent(dev)) {
101 dma_cache_wback_inv((unsigned long) ret, size);
102 ret = UNCAC_ADDR(ret);
103 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700104 }
105
106 return ret;
107}
108
109EXPORT_SYMBOL(dma_alloc_coherent);
110
111void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
112 dma_addr_t dma_handle)
113{
114 free_pages((unsigned long) vaddr, get_order(size));
115}
116
117EXPORT_SYMBOL(dma_free_noncoherent);
118
119void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
120 dma_addr_t dma_handle)
121{
122 unsigned long addr = (unsigned long) vaddr;
123
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000124 if (!plat_device_is_coherent(dev))
125 addr = CAC_ADDR(addr);
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 free_pages(addr, get_order(size));
128}
129
130EXPORT_SYMBOL(dma_free_coherent);
131
132static inline void __dma_sync(unsigned long addr, size_t size,
133 enum dma_data_direction direction)
134{
135 switch (direction) {
136 case DMA_TO_DEVICE:
137 dma_cache_wback(addr, size);
138 break;
139
140 case DMA_FROM_DEVICE:
141 dma_cache_inv(addr, size);
142 break;
143
144 case DMA_BIDIRECTIONAL:
145 dma_cache_wback_inv(addr, size);
146 break;
147
148 default:
149 BUG();
150 }
151}
152
153dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
154 enum dma_data_direction direction)
155{
156 unsigned long addr = (unsigned long) ptr;
157
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000158 if (!plat_device_is_coherent(dev))
159 __dma_sync(addr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000161 return plat_map_dma_mem(dev, ptr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162}
163
164EXPORT_SYMBOL(dma_map_single);
165
166void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
167 enum dma_data_direction direction)
168{
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000169 if (cpu_is_noncoherent_r10000(dev))
Franck Bui-Huuc9d06962007-03-19 17:36:42 +0100170 __dma_sync(dma_addr_to_virt(dma_addr), size,
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000171 direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000173 plat_unmap_dma_mem(dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174}
175
176EXPORT_SYMBOL(dma_unmap_single);
177
178int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
179 enum dma_data_direction direction)
180{
181 int i;
182
183 BUG_ON(direction == DMA_NONE);
184
185 for (i = 0; i < nents; i++, sg++) {
186 unsigned long addr;
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700187
Jens Axboe58b053e2007-10-22 20:02:46 +0200188 addr = (unsigned long) sg_virt(sg);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000189 if (!plat_device_is_coherent(dev) && addr)
Jens Axboe58b053e2007-10-22 20:02:46 +0200190 __dma_sync(addr, sg->length, direction);
Thomas Bogendoerferfbd56042007-05-18 14:32:36 +0200191 sg->dma_address = plat_map_dma_mem(dev,
Jens Axboe58b053e2007-10-22 20:02:46 +0200192 (void *)addr, sg->length);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 }
194
195 return nents;
196}
197
198EXPORT_SYMBOL(dma_map_sg);
199
200dma_addr_t dma_map_page(struct device *dev, struct page *page,
201 unsigned long offset, size_t size, enum dma_data_direction direction)
202{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 BUG_ON(direction == DMA_NONE);
204
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000205 if (!plat_device_is_coherent(dev)) {
206 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000208 addr = (unsigned long) page_address(page) + offset;
209 dma_cache_wback_inv(addr, size);
210 }
211
212 return plat_map_dma_mem_page(dev, page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213}
214
215EXPORT_SYMBOL(dma_map_page);
216
217void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
218 enum dma_data_direction direction)
219{
220 BUG_ON(direction == DMA_NONE);
221
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000222 if (!plat_device_is_coherent(dev) && direction != DMA_TO_DEVICE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 unsigned long addr;
224
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000225 addr = plat_dma_addr_to_phys(dma_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 dma_cache_wback_inv(addr, size);
227 }
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000228
229 plat_unmap_dma_mem(dma_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230}
231
232EXPORT_SYMBOL(dma_unmap_page);
233
234void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
235 enum dma_data_direction direction)
236{
237 unsigned long addr;
238 int i;
239
240 BUG_ON(direction == DMA_NONE);
241
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 for (i = 0; i < nhwentries; i++, sg++) {
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000243 if (!plat_device_is_coherent(dev) &&
244 direction != DMA_TO_DEVICE) {
Jens Axboe58b053e2007-10-22 20:02:46 +0200245 addr = (unsigned long) sg_virt(sg);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000246 if (addr)
Jens Axboe58b053e2007-10-22 20:02:46 +0200247 __dma_sync(addr, sg->length, direction);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000248 }
249 plat_unmap_dma_mem(sg->dma_address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 }
251}
252
253EXPORT_SYMBOL(dma_unmap_sg);
254
255void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
256 size_t size, enum dma_data_direction direction)
257{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 BUG_ON(direction == DMA_NONE);
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700259
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000260 if (cpu_is_noncoherent_r10000(dev)) {
261 unsigned long addr;
262
Franck Bui-Huuc9d06962007-03-19 17:36:42 +0100263 addr = dma_addr_to_virt(dma_handle);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000264 __dma_sync(addr, size, direction);
265 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266}
267
268EXPORT_SYMBOL(dma_sync_single_for_cpu);
269
270void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
271 size_t size, enum dma_data_direction direction)
272{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700273 BUG_ON(direction == DMA_NONE);
274
Thomas Bogendoerfer9b43fb62007-02-23 19:58:48 +0100275 if (!plat_device_is_coherent(dev)) {
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000276 unsigned long addr;
277
Franck Bui-Huuc9d06962007-03-19 17:36:42 +0100278 addr = dma_addr_to_virt(dma_handle);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000279 __dma_sync(addr, size, direction);
280 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281}
282
283EXPORT_SYMBOL(dma_sync_single_for_device);
284
285void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
286 unsigned long offset, size_t size, enum dma_data_direction direction)
287{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 BUG_ON(direction == DMA_NONE);
289
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000290 if (cpu_is_noncoherent_r10000(dev)) {
291 unsigned long addr;
292
Franck Bui-Huuc9d06962007-03-19 17:36:42 +0100293 addr = dma_addr_to_virt(dma_handle);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000294 __dma_sync(addr + offset, size, direction);
295 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296}
297
298EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
299
300void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
301 unsigned long offset, size_t size, enum dma_data_direction direction)
302{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 BUG_ON(direction == DMA_NONE);
304
Thomas Bogendoerfer9b43fb62007-02-23 19:58:48 +0100305 if (!plat_device_is_coherent(dev)) {
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000306 unsigned long addr;
307
Franck Bui-Huuc9d06962007-03-19 17:36:42 +0100308 addr = dma_addr_to_virt(dma_handle);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000309 __dma_sync(addr + offset, size, direction);
310 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311}
312
313EXPORT_SYMBOL(dma_sync_single_range_for_device);
314
315void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
316 enum dma_data_direction direction)
317{
318 int i;
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700319
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320 BUG_ON(direction == DMA_NONE);
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700321
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 /* Make sure that gcc doesn't leave the empty loop body. */
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000323 for (i = 0; i < nelems; i++, sg++) {
Ralf Baechle5b648a92007-03-02 11:42:11 +0000324 if (cpu_is_noncoherent_r10000(dev))
Jens Axboe58b053e2007-10-22 20:02:46 +0200325 __dma_sync((unsigned long)page_address(sg_page(sg)),
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000326 sg->length, direction);
327 plat_unmap_dma_mem(sg->dma_address);
328 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329}
330
331EXPORT_SYMBOL(dma_sync_sg_for_cpu);
332
333void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
334 enum dma_data_direction direction)
335{
336 int i;
337
338 BUG_ON(direction == DMA_NONE);
339
340 /* Make sure that gcc doesn't leave the empty loop body. */
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000341 for (i = 0; i < nelems; i++, sg++) {
342 if (!plat_device_is_coherent(dev))
Jens Axboe58b053e2007-10-22 20:02:46 +0200343 __dma_sync((unsigned long)page_address(sg_page(sg)),
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000344 sg->length, direction);
345 plat_unmap_dma_mem(sg->dma_address);
346 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347}
348
349EXPORT_SYMBOL(dma_sync_sg_for_device);
350
351int dma_mapping_error(dma_addr_t dma_addr)
352{
353 return 0;
354}
355
356EXPORT_SYMBOL(dma_mapping_error);
357
358int dma_supported(struct device *dev, u64 mask)
359{
360 /*
361 * we fall back to GFP_DMA when the mask isn't all 1s,
362 * so we can't guarantee allocations that must be
363 * within a tighter range than GFP_DMA..
364 */
Ralf Baechlecce335a2007-11-03 02:05:43 +0000365 if (mask < DMA_BIT_MASK(24))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 return 0;
367
368 return 1;
369}
370
371EXPORT_SYMBOL(dma_supported);
372
Ralf Baechlef67637e2006-12-06 20:38:54 -0800373int dma_is_consistent(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374{
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000375 return plat_device_is_coherent(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376}
377
378EXPORT_SYMBOL(dma_is_consistent);
379
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800380void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000381 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382{
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000383 BUG_ON(direction == DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000385 if (!plat_device_is_coherent(dev))
386 dma_cache_wback_inv((unsigned long)vaddr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387}
388
389EXPORT_SYMBOL(dma_cache_sync);