blob: 8f23cf08f4baa68d4d73b93b62a396867085f877 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
Ralf Baechle70342282013-01-22 12:59:30 +01007 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/types.h>
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000012#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/mm.h>
14#include <linux/module.h>
Jens Axboe4fcc47a2007-10-23 12:32:34 +020015#include <linux/scatterlist.h>
Ralf Baechle6e86b0b2007-10-29 19:35:33 +000016#include <linux/string.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/gfp.h>
Dezhong Diaoe36863a2010-10-13 16:57:35 -070018#include <linux/highmem.h>
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +010019#include <linux/dma-contiguous.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21#include <asm/cache.h>
Ralf Baechle69f24d12013-09-17 10:25:47 +020022#include <asm/cpu-type.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/io.h>
24
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000025#include <dma-coherence.h>
26
Felix Fietkau885014b2013-09-27 14:41:44 +020027#ifdef CONFIG_DMA_MAYBE_COHERENT
Steven J. Hillb6d92b42013-03-25 13:47:29 -050028int coherentio = 0; /* User defined DMA coherency from command line. */
29EXPORT_SYMBOL_GPL(coherentio);
30int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
31
32static int __init setcoherentio(char *str)
33{
34 coherentio = 1;
35 pr_info("Hardware DMA cache coherency (command line)\n");
36 return 0;
37}
38early_param("coherentio", setcoherentio);
39
40static int __init setnocoherentio(char *str)
41{
42 coherentio = 0;
43 pr_info("Software DMA cache coherency (command line)\n");
44 return 0;
45}
46early_param("nocoherentio", setnocoherentio);
Felix Fietkau885014b2013-09-27 14:41:44 +020047#endif
Steven J. Hillb6d92b42013-03-25 13:47:29 -050048
Dezhong Diaoe36863a2010-10-13 16:57:35 -070049static inline struct page *dma_addr_to_page(struct device *dev,
Kevin Cernekee3807ef3f62009-04-23 17:25:12 -070050 dma_addr_t dma_addr)
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010051{
Dezhong Diaoe36863a2010-10-13 16:57:35 -070052 return pfn_to_page(
53 plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010054}
55
Linus Torvalds1da177e2005-04-16 15:20:36 -070056/*
Jim Quinlanf86f55d2013-08-27 16:57:51 -040057 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
58 * speculatively fill random cachelines with stale data at any time,
59 * requiring an extra flush post-DMA.
60 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 * Warning on the terminology - Linux calls an uncached area coherent;
62 * MIPS terminology calls memory areas with hardware maintained coherency
63 * coherent.
Ralf Baechle0dc294c2014-11-11 22:22:03 +010064 *
65 * Note that the R14000 and R16000 should also be checked for in this
66 * condition. However this function is only called on non-I/O-coherent
67 * systems and only the R10000 and R12000 are used in such systems, the
68 * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 */
Jim Quinlanf86f55d2013-08-27 16:57:51 -040070static inline int cpu_needs_post_dma_flush(struct device *dev)
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000071{
72 return !plat_device_is_coherent(dev) &&
Jerin Jacobd451e732013-09-03 17:31:54 +053073 (boot_cpu_type() == CPU_R10000 ||
Ralf Baechleeb37e6d2013-09-06 19:08:25 +020074 boot_cpu_type() == CPU_R12000 ||
75 boot_cpu_type() == CPU_BMIPS5000);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000076}
77
Ralf Baechlecce335a2007-11-03 02:05:43 +000078static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
79{
Ralf Baechlea2e715a2010-09-02 23:22:23 +020080 gfp_t dma_flag;
81
Ralf Baechlecce335a2007-11-03 02:05:43 +000082 /* ignore region specifiers */
83 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
84
Ralf Baechlea2e715a2010-09-02 23:22:23 +020085#ifdef CONFIG_ISA
Ralf Baechlecce335a2007-11-03 02:05:43 +000086 if (dev == NULL)
Ralf Baechlea2e715a2010-09-02 23:22:23 +020087 dma_flag = __GFP_DMA;
Ralf Baechlecce335a2007-11-03 02:05:43 +000088 else
89#endif
Ralf Baechlea2e715a2010-09-02 23:22:23 +020090#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
Ralf Baechlecce335a2007-11-03 02:05:43 +000091 if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
Ralf Baechlea2e715a2010-09-02 23:22:23 +020092 dma_flag = __GFP_DMA;
93 else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
94 dma_flag = __GFP_DMA32;
Ralf Baechlecce335a2007-11-03 02:05:43 +000095 else
96#endif
Ralf Baechlea2e715a2010-09-02 23:22:23 +020097#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
98 if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
99 dma_flag = __GFP_DMA32;
100 else
101#endif
102#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
103 if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
104 dma_flag = __GFP_DMA;
105 else
106#endif
107 dma_flag = 0;
Ralf Baechlecce335a2007-11-03 02:05:43 +0000108
109 /* Don't invoke OOM killer */
110 gfp |= __GFP_NORETRY;
111
Ralf Baechlea2e715a2010-09-02 23:22:23 +0200112 return gfp | dma_flag;
Ralf Baechlecce335a2007-11-03 02:05:43 +0000113}
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115void *dma_alloc_noncoherent(struct device *dev, size_t size,
Al Viro185a8ff2005-10-21 03:21:23 -0400116 dma_addr_t * dma_handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700117{
118 void *ret;
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000119
Ralf Baechlecce335a2007-11-03 02:05:43 +0000120 gfp = massage_gfp_flags(dev, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 ret = (void *) __get_free_pages(gfp, get_order(size));
123
124 if (ret != NULL) {
125 memset(ret, 0, size);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000126 *dma_handle = plat_map_dma_mem(dev, ret, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 }
128
129 return ret;
130}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131EXPORT_SYMBOL(dma_alloc_noncoherent);
132
David Daney48e1fd52010-10-01 13:27:32 -0700133static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +0200134 dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135{
136 void *ret;
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100137 struct page *page = NULL;
138 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139
Yoichi Yuasaf8ac0422009-06-04 00:16:04 +0900140 if (dma_alloc_from_coherent(dev, size, dma_handle, &ret))
141 return ret;
142
Ralf Baechlecce335a2007-11-03 02:05:43 +0000143 gfp = massage_gfp_flags(dev, gfp);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000144
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100145 if (IS_ENABLED(CONFIG_DMA_CMA) && !(gfp & GFP_ATOMIC))
146 page = dma_alloc_from_contiguous(dev,
147 count, get_order(size));
148 if (!page)
149 page = alloc_pages(gfp, get_order(size));
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000150
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100151 if (!page)
152 return NULL;
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000153
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100154 ret = page_address(page);
155 memset(ret, 0, size);
156 *dma_handle = plat_map_dma_mem(dev, ret, size);
157 if (!plat_device_is_coherent(dev)) {
158 dma_cache_wback_inv((unsigned long) ret, size);
159 if (!hw_coherentio)
160 ret = UNCAC_ADDR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 }
162
163 return ret;
164}
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166
167void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
168 dma_addr_t dma_handle)
169{
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700170 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171 free_pages((unsigned long) vaddr, get_order(size));
172}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173EXPORT_SYMBOL(dma_free_noncoherent);
174
David Daney48e1fd52010-10-01 13:27:32 -0700175static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +0200176 dma_addr_t dma_handle, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177{
178 unsigned long addr = (unsigned long) vaddr;
Yoichi Yuasaf8ac0422009-06-04 00:16:04 +0900179 int order = get_order(size);
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100180 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
181 struct page *page = NULL;
Yoichi Yuasaf8ac0422009-06-04 00:16:04 +0900182
183 if (dma_release_from_coherent(dev, order, vaddr))
184 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700186 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
David Daney11531ac2008-12-10 18:14:45 -0800187
Steven J. Hillb6d92b42013-03-25 13:47:29 -0500188 if (!plat_device_is_coherent(dev) && !hw_coherentio)
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000189 addr = CAC_ADDR(addr);
190
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100191 page = virt_to_page((void *) addr);
192
193 if (!dma_release_from_contiguous(dev, page, count))
194 __free_pages(page, get_order(size));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195}
196
Alex Smith8c172462015-07-30 12:03:42 +0100197static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
198 void *cpu_addr, dma_addr_t dma_addr, size_t size,
199 struct dma_attrs *attrs)
200{
201 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
202 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
203 unsigned long addr = (unsigned long)cpu_addr;
204 unsigned long off = vma->vm_pgoff;
205 unsigned long pfn;
206 int ret = -ENXIO;
207
208 if (!plat_device_is_coherent(dev) && !hw_coherentio)
209 addr = CAC_ADDR(addr);
210
211 pfn = page_to_pfn(virt_to_page((void *)addr));
212
213 if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
214 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
215 else
216 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
217
218 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
219 return ret;
220
221 if (off < count && user_count <= (count - off)) {
222 ret = remap_pfn_range(vma, vma->vm_start,
223 pfn + off,
224 user_count << PAGE_SHIFT,
225 vma->vm_page_prot);
226 }
227
228 return ret;
229}
230
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700231static inline void __dma_sync_virtual(void *addr, size_t size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 enum dma_data_direction direction)
233{
234 switch (direction) {
235 case DMA_TO_DEVICE:
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700236 dma_cache_wback((unsigned long)addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 break;
238
239 case DMA_FROM_DEVICE:
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700240 dma_cache_inv((unsigned long)addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 break;
242
243 case DMA_BIDIRECTIONAL:
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700244 dma_cache_wback_inv((unsigned long)addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 break;
246
247 default:
248 BUG();
249 }
250}
251
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700252/*
253 * A single sg entry may refer to multiple physically contiguous
254 * pages. But we still need to process highmem pages individually.
255 * If highmem is not configured then the bulk of this loop gets
256 * optimized out.
257 */
258static inline void __dma_sync(struct page *page,
259 unsigned long offset, size_t size, enum dma_data_direction direction)
260{
261 size_t left = size;
262
263 do {
264 size_t len = left;
265
266 if (PageHighMem(page)) {
267 void *addr;
268
269 if (offset + len > PAGE_SIZE) {
270 if (offset >= PAGE_SIZE) {
271 page += offset >> PAGE_SHIFT;
272 offset &= ~PAGE_MASK;
273 }
274 len = PAGE_SIZE - offset;
275 }
276
277 addr = kmap_atomic(page);
278 __dma_sync_virtual(addr + offset, len, direction);
279 kunmap_atomic(addr);
280 } else
281 __dma_sync_virtual(page_address(page) + offset,
282 size, direction);
283 offset = 0;
284 page++;
285 left -= len;
286 } while (left);
287}
288
David Daney48e1fd52010-10-01 13:27:32 -0700289static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
290 size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291{
Jim Quinlanf86f55d2013-08-27 16:57:51 -0400292 if (cpu_needs_post_dma_flush(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700293 __dma_sync(dma_addr_to_page(dev, dma_addr),
294 dma_addr & ~PAGE_MASK, size, direction);
Ralf Baechle0acbfc62015-03-27 15:10:30 +0100295 plat_post_dma_flush(dev);
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700296 plat_unmap_dma_mem(dev, dma_addr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297}
298
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900299static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
David Daney48e1fd52010-10-01 13:27:32 -0700300 int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301{
302 int i;
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900303 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900305 for_each_sg(sglist, sg, nents, i) {
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700306 if (!plat_device_is_coherent(dev))
307 __dma_sync(sg_page(sg), sg->offset, sg->length,
308 direction);
Jayachandran C4954a9a2013-06-10 06:28:08 +0000309#ifdef CONFIG_NEED_SG_DMA_LENGTH
310 sg->dma_length = sg->length;
311#endif
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700312 sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
313 sg->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 }
315
316 return nents;
317}
318
David Daney48e1fd52010-10-01 13:27:32 -0700319static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
320 unsigned long offset, size_t size, enum dma_data_direction direction,
321 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322{
David Daney48e1fd52010-10-01 13:27:32 -0700323 if (!plat_device_is_coherent(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700324 __dma_sync(page, offset, size, direction);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000325
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700326 return plat_map_dma_mem_page(dev, page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327}
328
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900329static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
David Daney48e1fd52010-10-01 13:27:32 -0700330 int nhwentries, enum dma_data_direction direction,
331 struct dma_attrs *attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333 int i;
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900334 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900336 for_each_sg(sglist, sg, nhwentries, i) {
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000337 if (!plat_device_is_coherent(dev) &&
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700338 direction != DMA_TO_DEVICE)
339 __dma_sync(sg_page(sg), sg->offset, sg->length,
340 direction);
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700341 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 }
343}
344
David Daney48e1fd52010-10-01 13:27:32 -0700345static void mips_dma_sync_single_for_cpu(struct device *dev,
346 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347{
Jim Quinlanf86f55d2013-08-27 16:57:51 -0400348 if (cpu_needs_post_dma_flush(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700349 __dma_sync(dma_addr_to_page(dev, dma_handle),
350 dma_handle & ~PAGE_MASK, size, direction);
Ralf Baechle0acbfc62015-03-27 15:10:30 +0100351 plat_post_dma_flush(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352}
353
David Daney48e1fd52010-10-01 13:27:32 -0700354static void mips_dma_sync_single_for_device(struct device *dev,
355 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700357 if (!plat_device_is_coherent(dev))
358 __dma_sync(dma_addr_to_page(dev, dma_handle),
359 dma_handle & ~PAGE_MASK, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360}
361
David Daney48e1fd52010-10-01 13:27:32 -0700362static void mips_dma_sync_sg_for_cpu(struct device *dev,
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900363 struct scatterlist *sglist, int nelems,
364 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365{
366 int i;
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900367 struct scatterlist *sg;
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700368
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900369 if (cpu_needs_post_dma_flush(dev)) {
370 for_each_sg(sglist, sg, nelems, i) {
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700371 __dma_sync(sg_page(sg), sg->offset, sg->length,
372 direction);
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900373 }
374 }
Ralf Baechle0acbfc62015-03-27 15:10:30 +0100375 plat_post_dma_flush(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376}
377
David Daney48e1fd52010-10-01 13:27:32 -0700378static void mips_dma_sync_sg_for_device(struct device *dev,
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900379 struct scatterlist *sglist, int nelems,
380 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381{
382 int i;
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900383 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900385 if (!plat_device_is_coherent(dev)) {
386 for_each_sg(sglist, sg, nelems, i) {
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700387 __dma_sync(sg_page(sg), sg->offset, sg->length,
388 direction);
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900389 }
390 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391}
392
David Daney48e1fd52010-10-01 13:27:32 -0700393int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394{
Felix Fietkau4e7f7262013-08-15 11:28:30 +0200395 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396}
397
David Daney48e1fd52010-10-01 13:27:32 -0700398int mips_dma_supported(struct device *dev, u64 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399{
David Daney843aef42008-12-11 15:33:36 -0800400 return plat_dma_supported(dev, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401}
402
Ralf Baechlea3aad4a2010-12-09 19:14:09 +0000403void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
David Daney48e1fd52010-10-01 13:27:32 -0700404 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405{
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000406 BUG_ON(direction == DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000408 if (!plat_device_is_coherent(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700409 __dma_sync_virtual(vaddr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410}
411
Ralf Baechlea3aad4a2010-12-09 19:14:09 +0000412EXPORT_SYMBOL(dma_cache_sync);
413
David Daney48e1fd52010-10-01 13:27:32 -0700414static struct dma_map_ops mips_default_dma_map_ops = {
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +0200415 .alloc = mips_dma_alloc_coherent,
416 .free = mips_dma_free_coherent,
Alex Smith8c172462015-07-30 12:03:42 +0100417 .mmap = mips_dma_mmap,
David Daney48e1fd52010-10-01 13:27:32 -0700418 .map_page = mips_dma_map_page,
419 .unmap_page = mips_dma_unmap_page,
420 .map_sg = mips_dma_map_sg,
421 .unmap_sg = mips_dma_unmap_sg,
422 .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
423 .sync_single_for_device = mips_dma_sync_single_for_device,
424 .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
425 .sync_sg_for_device = mips_dma_sync_sg_for_device,
426 .mapping_error = mips_dma_mapping_error,
427 .dma_supported = mips_dma_supported
428};
429
430struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
431EXPORT_SYMBOL(mips_dma_map_ops);
432
433#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
434
435static int __init mips_dma_init(void)
436{
437 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
438
439 return 0;
440}
441fs_initcall(mips_dma_init);