blob: f9ef83829a523fd65803b3f5b2c8fdfe77e31a8f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
Ralf Baechle70342282013-01-22 12:59:30 +01007 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/types.h>
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000012#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/mm.h>
14#include <linux/module.h>
Jens Axboe4fcc47a2007-10-23 12:32:34 +020015#include <linux/scatterlist.h>
Ralf Baechle6e86b0b2007-10-29 19:35:33 +000016#include <linux/string.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/gfp.h>
Dezhong Diaoe36863a2010-10-13 16:57:35 -070018#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019
20#include <asm/cache.h>
21#include <asm/io.h>
22
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000023#include <dma-coherence.h>
24
Dezhong Diaoe36863a2010-10-13 16:57:35 -070025static inline struct page *dma_addr_to_page(struct device *dev,
Kevin Cernekee3807ef32009-04-23 17:25:12 -070026 dma_addr_t dma_addr)
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010027{
Dezhong Diaoe36863a2010-10-13 16:57:35 -070028 return pfn_to_page(
29 plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010030}
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032/*
33 * Warning on the terminology - Linux calls an uncached area coherent;
34 * MIPS terminology calls memory areas with hardware maintained coherency
35 * coherent.
36 */
37
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000038static inline int cpu_is_noncoherent_r10000(struct device *dev)
39{
40 return !plat_device_is_coherent(dev) &&
Ralf Baechle10cc3522007-10-11 23:46:15 +010041 (current_cpu_type() == CPU_R10000 ||
42 current_cpu_type() == CPU_R12000);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000043}
44
Ralf Baechlecce335a2007-11-03 02:05:43 +000045static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
46{
Ralf Baechlea2e715a2010-09-02 23:22:23 +020047 gfp_t dma_flag;
48
Ralf Baechlecce335a2007-11-03 02:05:43 +000049 /* ignore region specifiers */
50 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
51
Ralf Baechlea2e715a2010-09-02 23:22:23 +020052#ifdef CONFIG_ISA
Ralf Baechlecce335a2007-11-03 02:05:43 +000053 if (dev == NULL)
Ralf Baechlea2e715a2010-09-02 23:22:23 +020054 dma_flag = __GFP_DMA;
Ralf Baechlecce335a2007-11-03 02:05:43 +000055 else
56#endif
Ralf Baechlea2e715a2010-09-02 23:22:23 +020057#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
Ralf Baechlecce335a2007-11-03 02:05:43 +000058 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Ralf Baechlea2e715a2010-09-02 23:22:23 +020059 dma_flag = __GFP_DMA;
60 else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
61 dma_flag = __GFP_DMA32;
Ralf Baechlecce335a2007-11-03 02:05:43 +000062 else
63#endif
Ralf Baechlea2e715a2010-09-02 23:22:23 +020064#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
65 if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
66 dma_flag = __GFP_DMA32;
67 else
68#endif
69#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
70 if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
71 dma_flag = __GFP_DMA;
72 else
73#endif
74 dma_flag = 0;
Ralf Baechlecce335a2007-11-03 02:05:43 +000075
76 /* Don't invoke OOM killer */
77 gfp |= __GFP_NORETRY;
78
Ralf Baechlea2e715a2010-09-02 23:22:23 +020079 return gfp | dma_flag;
Ralf Baechlecce335a2007-11-03 02:05:43 +000080}
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082void *dma_alloc_noncoherent(struct device *dev, size_t size,
Al Viro185a8ff2005-10-21 03:21:23 -040083 dma_addr_t * dma_handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070084{
85 void *ret;
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000086
Ralf Baechlecce335a2007-11-03 02:05:43 +000087 gfp = massage_gfp_flags(dev, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 ret = (void *) __get_free_pages(gfp, get_order(size));
90
91 if (ret != NULL) {
92 memset(ret, 0, size);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000093 *dma_handle = plat_map_dma_mem(dev, ret, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 }
95
96 return ret;
97}
Linus Torvalds1da177e2005-04-16 15:20:36 -070098EXPORT_SYMBOL(dma_alloc_noncoherent);
99
David Daney48e1fd52010-10-01 13:27:32 -0700100static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +0200101 dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102{
103 void *ret;
104
Yoichi Yuasaf8ac0422009-06-04 00:16:04 +0900105 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
106 return ret;
107
Ralf Baechlecce335a2007-11-03 02:05:43 +0000108 gfp = massage_gfp_flags(dev, gfp);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000109
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000110 ret = (void *) __get_free_pages(gfp, get_order(size));
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 if (ret) {
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000113 memset(ret, 0, size);
114 *dma_handle = plat_map_dma_mem(dev, ret, size);
115
116 if (!plat_device_is_coherent(dev)) {
117 dma_cache_wback_inv((unsigned long) ret, size);
118 ret = UNCAC_ADDR(ret);
119 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120 }
121
122 return ret;
123}
124
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
126void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
127 dma_addr_t dma_handle)
128{
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700129 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130 free_pages((unsigned long) vaddr, get_order(size));
131}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132EXPORT_SYMBOL(dma_free_noncoherent);
133
David Daney48e1fd52010-10-01 13:27:32 -0700134static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +0200135 dma_addr_t dma_handle, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136{
137 unsigned long addr = (unsigned long) vaddr;
Yoichi Yuasaf8ac0422009-06-04 00:16:04 +0900138 int order = get_order(size);
139
140 if (dma_release_from_coherent(dev, order, vaddr))
141 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700143 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
David Daney11531ac2008-12-10 18:14:45 -0800144
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000145 if (!plat_device_is_coherent(dev))
146 addr = CAC_ADDR(addr);
147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 free_pages(addr, get_order(size));
149}
150
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700151static inline void __dma_sync_virtual(void *addr, size_t size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 enum dma_data_direction direction)
153{
154 switch (direction) {
155 case DMA_TO_DEVICE:
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700156 dma_cache_wback((unsigned long)addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 break;
158
159 case DMA_FROM_DEVICE:
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700160 dma_cache_inv((unsigned long)addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 break;
162
163 case DMA_BIDIRECTIONAL:
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700164 dma_cache_wback_inv((unsigned long)addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 break;
166
167 default:
168 BUG();
169 }
170}
171
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700172/*
173 * A single sg entry may refer to multiple physically contiguous
174 * pages. But we still need to process highmem pages individually.
175 * If highmem is not configured then the bulk of this loop gets
176 * optimized out.
177 */
178static inline void __dma_sync(struct page *page,
179 unsigned long offset, size_t size, enum dma_data_direction direction)
180{
181 size_t left = size;
182
183 do {
184 size_t len = left;
185
186 if (PageHighMem(page)) {
187 void *addr;
188
189 if (offset + len > PAGE_SIZE) {
190 if (offset >= PAGE_SIZE) {
191 page += offset >> PAGE_SHIFT;
192 offset &= ~PAGE_MASK;
193 }
194 len = PAGE_SIZE - offset;
195 }
196
197 addr = kmap_atomic(page);
198 __dma_sync_virtual(addr + offset, len, direction);
199 kunmap_atomic(addr);
200 } else
201 __dma_sync_virtual(page_address(page) + offset,
202 size, direction);
203 offset = 0;
204 page++;
205 left -= len;
206 } while (left);
207}
208
David Daney48e1fd52010-10-01 13:27:32 -0700209static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
210 size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000212 if (cpu_is_noncoherent_r10000(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700213 __dma_sync(dma_addr_to_page(dev, dma_addr),
214 dma_addr & ~PAGE_MASK, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700216 plat_unmap_dma_mem(dev, dma_addr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217}
218
David Daney48e1fd52010-10-01 13:27:32 -0700219static int mips_dma_map_sg(struct device *dev, struct scatterlist *sg,
220 int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221{
222 int i;
223
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 for (i = 0; i < nents; i++, sg++) {
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700225 if (!plat_device_is_coherent(dev))
226 __dma_sync(sg_page(sg), sg->offset, sg->length,
227 direction);
228 sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
229 sg->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 }
231
232 return nents;
233}
234
David Daney48e1fd52010-10-01 13:27:32 -0700235static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
236 unsigned long offset, size_t size, enum dma_data_direction direction,
237 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700238{
David Daney48e1fd52010-10-01 13:27:32 -0700239 if (!plat_device_is_coherent(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700240 __dma_sync(page, offset, size, direction);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000241
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700242 return plat_map_dma_mem_page(dev, page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243}
244
David Daney48e1fd52010-10-01 13:27:32 -0700245static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sg,
246 int nhwentries, enum dma_data_direction direction,
247 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 int i;
250
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 for (i = 0; i < nhwentries; i++, sg++) {
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000252 if (!plat_device_is_coherent(dev) &&
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700253 direction != DMA_TO_DEVICE)
254 __dma_sync(sg_page(sg), sg->offset, sg->length,
255 direction);
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700256 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258}
259
David Daney48e1fd52010-10-01 13:27:32 -0700260static void mips_dma_sync_single_for_cpu(struct device *dev,
261 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262{
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700263 if (cpu_is_noncoherent_r10000(dev))
264 __dma_sync(dma_addr_to_page(dev, dma_handle),
265 dma_handle & ~PAGE_MASK, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266}
267
David Daney48e1fd52010-10-01 13:27:32 -0700268static void mips_dma_sync_single_for_device(struct device *dev,
269 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270{
David Daney843aef42008-12-11 15:33:36 -0800271 plat_extra_sync_for_device(dev);
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700272 if (!plat_device_is_coherent(dev))
273 __dma_sync(dma_addr_to_page(dev, dma_handle),
274 dma_handle & ~PAGE_MASK, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275}
276
David Daney48e1fd52010-10-01 13:27:32 -0700277static void mips_dma_sync_sg_for_cpu(struct device *dev,
278 struct scatterlist *sg, int nelems, enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279{
280 int i;
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700281
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 /* Make sure that gcc doesn't leave the empty loop body. */
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000283 for (i = 0; i < nelems; i++, sg++) {
Ralf Baechle5b648a92007-03-02 11:42:11 +0000284 if (cpu_is_noncoherent_r10000(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700285 __dma_sync(sg_page(sg), sg->offset, sg->length,
286 direction);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000287 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288}
289
David Daney48e1fd52010-10-01 13:27:32 -0700290static void mips_dma_sync_sg_for_device(struct device *dev,
291 struct scatterlist *sg, int nelems, enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292{
293 int i;
294
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 /* Make sure that gcc doesn't leave the empty loop body. */
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000296 for (i = 0; i < nelems; i++, sg++) {
297 if (!plat_device_is_coherent(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700298 __dma_sync(sg_page(sg), sg->offset, sg->length,
299 direction);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000300 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
David Daney48e1fd52010-10-01 13:27:32 -0700303int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
David Daney843aef42008-12-11 15:33:36 -0800305 return plat_dma_mapping_error(dev, dma_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
307
David Daney48e1fd52010-10-01 13:27:32 -0700308int mips_dma_supported(struct device *dev, u64 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309{
David Daney843aef42008-12-11 15:33:36 -0800310 return plat_dma_supported(dev, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311}
312
Ralf Baechlea3aad4a2010-12-09 19:14:09 +0000313void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
David Daney48e1fd52010-10-01 13:27:32 -0700314 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315{
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000316 BUG_ON(direction == DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
David Daney843aef42008-12-11 15:33:36 -0800318 plat_extra_sync_for_device(dev);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000319 if (!plat_device_is_coherent(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700320 __dma_sync_virtual(vaddr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321}
322
Ralf Baechlea3aad4a2010-12-09 19:14:09 +0000323EXPORT_SYMBOL(dma_cache_sync);
324
David Daney48e1fd52010-10-01 13:27:32 -0700325static struct dma_map_ops mips_default_dma_map_ops = {
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +0200326 .alloc = mips_dma_alloc_coherent,
327 .free = mips_dma_free_coherent,
David Daney48e1fd52010-10-01 13:27:32 -0700328 .map_page = mips_dma_map_page,
329 .unmap_page = mips_dma_unmap_page,
330 .map_sg = mips_dma_map_sg,
331 .unmap_sg = mips_dma_unmap_sg,
332 .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
333 .sync_single_for_device = mips_dma_sync_single_for_device,
334 .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
335 .sync_sg_for_device = mips_dma_sync_sg_for_device,
336 .mapping_error = mips_dma_mapping_error,
337 .dma_supported = mips_dma_supported
338};
339
340struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
341EXPORT_SYMBOL(mips_dma_map_ops);
342
343#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
344
345static int __init mips_dma_init(void)
346{
347 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
348
349 return 0;
350}
351fs_initcall(mips_dma_init);