blob: 46d5696c4f276a7cdd729057fb4ee7044146d9e3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
Ralf Baechle70342282013-01-22 12:59:30 +01007 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/types.h>
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000012#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/mm.h>
Paul Gortmakerd9ba5772016-08-21 15:58:14 -040014#include <linux/export.h>
Jens Axboe4fcc47a2007-10-23 12:32:34 +020015#include <linux/scatterlist.h>
Ralf Baechle6e86b0b2007-10-29 19:35:33 +000016#include <linux/string.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/gfp.h>
Dezhong Diaoe36863a2010-10-13 16:57:35 -070018#include <linux/highmem.h>
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +010019#include <linux/dma-contiguous.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21#include <asm/cache.h>
Ralf Baechle69f24d12013-09-17 10:25:47 +020022#include <asm/cpu-type.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/io.h>
24
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000025#include <dma-coherence.h>
26
Paul Burton20d33062016-10-05 18:18:16 +010027#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
Paul Burtonf2302022016-10-05 18:18:14 +010028/* User defined DMA coherency from command line. */
29enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
Steven J. Hillb6d92b42013-03-25 13:47:29 -050030EXPORT_SYMBOL_GPL(coherentio);
31int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
32
33static int __init setcoherentio(char *str)
34{
Paul Burtonf2302022016-10-05 18:18:14 +010035 coherentio = IO_COHERENCE_ENABLED;
Steven J. Hillb6d92b42013-03-25 13:47:29 -050036 pr_info("Hardware DMA cache coherency (command line)\n");
37 return 0;
38}
39early_param("coherentio", setcoherentio);
40
41static int __init setnocoherentio(char *str)
42{
Paul Burtonf2302022016-10-05 18:18:14 +010043 coherentio = IO_COHERENCE_DISABLED;
Steven J. Hillb6d92b42013-03-25 13:47:29 -050044 pr_info("Software DMA cache coherency (command line)\n");
45 return 0;
46}
47early_param("nocoherentio", setnocoherentio);
Felix Fietkau885014b2013-09-27 14:41:44 +020048#endif
Steven J. Hillb6d92b42013-03-25 13:47:29 -050049
Dezhong Diaoe36863a2010-10-13 16:57:35 -070050static inline struct page *dma_addr_to_page(struct device *dev,
Kevin Cernekee3807ef32009-04-23 17:25:12 -070051 dma_addr_t dma_addr)
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010052{
Dezhong Diaoe36863a2010-10-13 16:57:35 -070053 return pfn_to_page(
54 plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010055}
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/*
Jim Quinlanf86f55d2013-08-27 16:57:51 -040058 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
59 * speculatively fill random cachelines with stale data at any time,
60 * requiring an extra flush post-DMA.
61 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 * Warning on the terminology - Linux calls an uncached area coherent;
63 * MIPS terminology calls memory areas with hardware maintained coherency
64 * coherent.
Ralf Baechle0dc294c2014-11-11 22:22:03 +010065 *
66 * Note that the R14000 and R16000 should also be checked for in this
67 * condition. However this function is only called on non-I/O-coherent
68 * systems and only the R10000 and R12000 are used in such systems, the
69 * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 */
Jim Quinlanf86f55d2013-08-27 16:57:51 -040071static inline int cpu_needs_post_dma_flush(struct device *dev)
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000072{
73 return !plat_device_is_coherent(dev) &&
Jerin Jacobd451e732013-09-03 17:31:54 +053074 (boot_cpu_type() == CPU_R10000 ||
Ralf Baechleeb37e6d2013-09-06 19:08:25 +020075 boot_cpu_type() == CPU_R12000 ||
76 boot_cpu_type() == CPU_BMIPS5000);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000077}
78
Ralf Baechlecce335ae2007-11-03 02:05:43 +000079static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
80{
Ralf Baechlea2e715a2010-09-02 23:22:23 +020081 gfp_t dma_flag;
82
Ralf Baechlecce335ae2007-11-03 02:05:43 +000083 /* ignore region specifiers */
84 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
85
Ralf Baechlea2e715a2010-09-02 23:22:23 +020086#ifdef CONFIG_ISA
Ralf Baechlecce335ae2007-11-03 02:05:43 +000087 if (dev == NULL)
Ralf Baechlea2e715a2010-09-02 23:22:23 +020088 dma_flag = __GFP_DMA;
Ralf Baechlecce335ae2007-11-03 02:05:43 +000089 else
90#endif
Ralf Baechlea2e715a2010-09-02 23:22:23 +020091#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
Matt Redfearn8d4925e2015-12-21 15:21:42 +000092 if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32))
Ralf Baechlea2e715a2010-09-02 23:22:23 +020093 dma_flag = __GFP_DMA;
94 else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
95 dma_flag = __GFP_DMA32;
Ralf Baechlecce335ae2007-11-03 02:05:43 +000096 else
97#endif
Ralf Baechlea2e715a2010-09-02 23:22:23 +020098#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
Matt Redfearn8d4925e2015-12-21 15:21:42 +000099 if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64))
Ralf Baechlea2e715a2010-09-02 23:22:23 +0200100 dma_flag = __GFP_DMA32;
101 else
102#endif
103#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
Matt Redfearn8d4925e2015-12-21 15:21:42 +0000104 if (dev == NULL ||
105 dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
Ralf Baechlea2e715a2010-09-02 23:22:23 +0200106 dma_flag = __GFP_DMA;
107 else
108#endif
109 dma_flag = 0;
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000110
111 /* Don't invoke OOM killer */
112 gfp |= __GFP_NORETRY;
113
Ralf Baechlea2e715a2010-09-02 23:22:23 +0200114 return gfp | dma_flag;
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000115}
116
Christoph Hellwig1e893752015-09-09 15:39:42 -0700117static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size,
Al Viro185a8ff2005-10-21 03:21:23 -0400118 dma_addr_t * dma_handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119{
120 void *ret;
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000121
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000122 gfp = massage_gfp_flags(dev, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 ret = (void *) __get_free_pages(gfp, get_order(size));
125
126 if (ret != NULL) {
127 memset(ret, 0, size);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000128 *dma_handle = plat_map_dma_mem(dev, ret, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 }
130
131 return ret;
132}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
David Daney48e1fd52010-10-01 13:27:32 -0700134static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700135 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136{
137 void *ret;
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100138 struct page *page = NULL;
139 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Christoph Hellwig1e893752015-09-09 15:39:42 -0700141 /*
142 * XXX: seems like the coherent and non-coherent implementations could
143 * be consolidated.
144 */
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700145 if (attrs & DMA_ATTR_NON_CONSISTENT)
Christoph Hellwig1e893752015-09-09 15:39:42 -0700146 return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp);
147
Ralf Baechlecce335ae2007-11-03 02:05:43 +0000148 gfp = massage_gfp_flags(dev, gfp);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000149
Qais Yousef9530d0f2015-12-11 13:41:09 -0800150 if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100151 page = dma_alloc_from_contiguous(dev,
152 count, get_order(size));
153 if (!page)
154 page = alloc_pages(gfp, get_order(size));
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000155
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100156 if (!page)
157 return NULL;
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000158
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100159 ret = page_address(page);
160 memset(ret, 0, size);
161 *dma_handle = plat_map_dma_mem(dev, ret, size);
162 if (!plat_device_is_coherent(dev)) {
163 dma_cache_wback_inv((unsigned long) ret, size);
Paul Burtoncfa93fb2016-10-05 18:18:15 +0100164 ret = UNCAC_ADDR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 }
166
167 return ret;
168}
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
Christoph Hellwig1e893752015-09-09 15:39:42 -0700171static void mips_dma_free_noncoherent(struct device *dev, size_t size,
172 void *vaddr, dma_addr_t dma_handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173{
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700174 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 free_pages((unsigned long) vaddr, get_order(size));
176}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
David Daney48e1fd52010-10-01 13:27:32 -0700178static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700179 dma_addr_t dma_handle, unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180{
181 unsigned long addr = (unsigned long) vaddr;
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100182 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
183 struct page *page = NULL;
Yoichi Yuasaf8ac04252009-06-04 00:16:04 +0900184
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700185 if (attrs & DMA_ATTR_NON_CONSISTENT) {
Christoph Hellwig1e893752015-09-09 15:39:42 -0700186 mips_dma_free_noncoherent(dev, size, vaddr, dma_handle);
187 return;
188 }
189
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700190 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
David Daney11531ac2008-12-10 18:14:45 -0800191
Paul Burtoncfa93fb2016-10-05 18:18:15 +0100192 if (!plat_device_is_coherent(dev))
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000193 addr = CAC_ADDR(addr);
194
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100195 page = virt_to_page((void *) addr);
196
197 if (!dma_release_from_contiguous(dev, page, count))
198 __free_pages(page, get_order(size));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199}
200
Alex Smith8c172462015-07-30 12:03:42 +0100201static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
202 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700203 unsigned long attrs)
Alex Smith8c172462015-07-30 12:03:42 +0100204{
205 unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
206 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
207 unsigned long addr = (unsigned long)cpu_addr;
208 unsigned long off = vma->vm_pgoff;
209 unsigned long pfn;
210 int ret = -ENXIO;
211
Paul Burtoncfa93fb2016-10-05 18:18:15 +0100212 if (!plat_device_is_coherent(dev))
Alex Smith8c172462015-07-30 12:03:42 +0100213 addr = CAC_ADDR(addr);
214
215 pfn = page_to_pfn(virt_to_page((void *)addr));
216
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700217 if (attrs & DMA_ATTR_WRITE_COMBINE)
Alex Smith8c172462015-07-30 12:03:42 +0100218 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
219 else
220 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
221
222 if (dma_mmap_from_coherent(dev, vma, cpu_addr, size, &ret))
223 return ret;
224
225 if (off < count && user_count <= (count - off)) {
226 ret = remap_pfn_range(vma, vma->vm_start,
227 pfn + off,
228 user_count << PAGE_SHIFT,
229 vma->vm_page_prot);
230 }
231
232 return ret;
233}
234
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700235static inline void __dma_sync_virtual(void *addr, size_t size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 enum dma_data_direction direction)
237{
238 switch (direction) {
239 case DMA_TO_DEVICE:
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700240 dma_cache_wback((unsigned long)addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 break;
242
243 case DMA_FROM_DEVICE:
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700244 dma_cache_inv((unsigned long)addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 break;
246
247 case DMA_BIDIRECTIONAL:
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700248 dma_cache_wback_inv((unsigned long)addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 break;
250
251 default:
252 BUG();
253 }
254}
255
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700256/*
257 * A single sg entry may refer to multiple physically contiguous
258 * pages. But we still need to process highmem pages individually.
259 * If highmem is not configured then the bulk of this loop gets
260 * optimized out.
261 */
262static inline void __dma_sync(struct page *page,
263 unsigned long offset, size_t size, enum dma_data_direction direction)
264{
265 size_t left = size;
266
267 do {
268 size_t len = left;
269
270 if (PageHighMem(page)) {
271 void *addr;
272
273 if (offset + len > PAGE_SIZE) {
274 if (offset >= PAGE_SIZE) {
275 page += offset >> PAGE_SHIFT;
276 offset &= ~PAGE_MASK;
277 }
278 len = PAGE_SIZE - offset;
279 }
280
281 addr = kmap_atomic(page);
282 __dma_sync_virtual(addr + offset, len, direction);
283 kunmap_atomic(addr);
284 } else
285 __dma_sync_virtual(page_address(page) + offset,
286 size, direction);
287 offset = 0;
288 page++;
289 left -= len;
290 } while (left);
291}
292
David Daney48e1fd52010-10-01 13:27:32 -0700293static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700294 size_t size, enum dma_data_direction direction, unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295{
Jim Quinlanf86f55d2013-08-27 16:57:51 -0400296 if (cpu_needs_post_dma_flush(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700297 __dma_sync(dma_addr_to_page(dev, dma_addr),
298 dma_addr & ~PAGE_MASK, size, direction);
Ralf Baechle0acbfc62015-03-27 15:10:30 +0100299 plat_post_dma_flush(dev);
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700300 plat_unmap_dma_mem(dev, dma_addr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301}
302
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900303static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700304 int nents, enum dma_data_direction direction, unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305{
306 int i;
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900307 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900309 for_each_sg(sglist, sg, nents, i) {
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700310 if (!plat_device_is_coherent(dev))
311 __dma_sync(sg_page(sg), sg->offset, sg->length,
312 direction);
Jayachandran C4954a9a2013-06-10 06:28:08 +0000313#ifdef CONFIG_NEED_SG_DMA_LENGTH
314 sg->dma_length = sg->length;
315#endif
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700316 sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
317 sg->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 }
319
320 return nents;
321}
322
David Daney48e1fd52010-10-01 13:27:32 -0700323static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
324 unsigned long offset, size_t size, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700325 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326{
David Daney48e1fd52010-10-01 13:27:32 -0700327 if (!plat_device_is_coherent(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700328 __dma_sync(page, offset, size, direction);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000329
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700330 return plat_map_dma_mem_page(dev, page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331}
332
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900333static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
David Daney48e1fd52010-10-01 13:27:32 -0700334 int nhwentries, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700335 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 int i;
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900338 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900340 for_each_sg(sglist, sg, nhwentries, i) {
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000341 if (!plat_device_is_coherent(dev) &&
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700342 direction != DMA_TO_DEVICE)
343 __dma_sync(sg_page(sg), sg->offset, sg->length,
344 direction);
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700345 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 }
347}
348
David Daney48e1fd52010-10-01 13:27:32 -0700349static void mips_dma_sync_single_for_cpu(struct device *dev,
350 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351{
Jim Quinlanf86f55d2013-08-27 16:57:51 -0400352 if (cpu_needs_post_dma_flush(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700353 __dma_sync(dma_addr_to_page(dev, dma_handle),
354 dma_handle & ~PAGE_MASK, size, direction);
Ralf Baechle0acbfc62015-03-27 15:10:30 +0100355 plat_post_dma_flush(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356}
357
David Daney48e1fd52010-10-01 13:27:32 -0700358static void mips_dma_sync_single_for_device(struct device *dev,
359 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360{
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700361 if (!plat_device_is_coherent(dev))
362 __dma_sync(dma_addr_to_page(dev, dma_handle),
363 dma_handle & ~PAGE_MASK, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364}
365
David Daney48e1fd52010-10-01 13:27:32 -0700366static void mips_dma_sync_sg_for_cpu(struct device *dev,
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900367 struct scatterlist *sglist, int nelems,
368 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369{
370 int i;
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900371 struct scatterlist *sg;
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700372
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900373 if (cpu_needs_post_dma_flush(dev)) {
374 for_each_sg(sglist, sg, nelems, i) {
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700375 __dma_sync(sg_page(sg), sg->offset, sg->length,
376 direction);
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900377 }
378 }
Ralf Baechle0acbfc62015-03-27 15:10:30 +0100379 plat_post_dma_flush(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380}
381
David Daney48e1fd52010-10-01 13:27:32 -0700382static void mips_dma_sync_sg_for_device(struct device *dev,
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900383 struct scatterlist *sglist, int nelems,
384 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385{
386 int i;
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900387 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900389 if (!plat_device_is_coherent(dev)) {
390 for_each_sg(sglist, sg, nelems, i) {
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700391 __dma_sync(sg_page(sg), sg->offset, sg->length,
392 direction);
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900393 }
394 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395}
396
David Daney48e1fd52010-10-01 13:27:32 -0700397int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
Felix Fietkau4e7f7262013-08-15 11:28:30 +0200399 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400}
401
David Daney48e1fd52010-10-01 13:27:32 -0700402int mips_dma_supported(struct device *dev, u64 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403{
David Daney843aef42008-12-11 15:33:36 -0800404 return plat_dma_supported(dev, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405}
406
Ralf Baechlea3aad4a2010-12-09 19:14:09 +0000407void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
David Daney48e1fd52010-10-01 13:27:32 -0700408 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409{
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000410 BUG_ON(direction == DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000412 if (!plat_device_is_coherent(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700413 __dma_sync_virtual(vaddr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414}
415
Ralf Baechlea3aad4a2010-12-09 19:14:09 +0000416EXPORT_SYMBOL(dma_cache_sync);
417
David Daney48e1fd52010-10-01 13:27:32 -0700418static struct dma_map_ops mips_default_dma_map_ops = {
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +0200419 .alloc = mips_dma_alloc_coherent,
420 .free = mips_dma_free_coherent,
Alex Smith8c172462015-07-30 12:03:42 +0100421 .mmap = mips_dma_mmap,
David Daney48e1fd52010-10-01 13:27:32 -0700422 .map_page = mips_dma_map_page,
423 .unmap_page = mips_dma_unmap_page,
424 .map_sg = mips_dma_map_sg,
425 .unmap_sg = mips_dma_unmap_sg,
426 .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
427 .sync_single_for_device = mips_dma_sync_single_for_device,
428 .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
429 .sync_sg_for_device = mips_dma_sync_sg_for_device,
430 .mapping_error = mips_dma_mapping_error,
431 .dma_supported = mips_dma_supported
432};
433
434struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
435EXPORT_SYMBOL(mips_dma_map_ops);
436
437#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
438
439static int __init mips_dma_init(void)
440{
441 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
442
443 return 0;
444}
445fs_initcall(mips_dma_init);