blob: e3e94d05f0fd77437a1746a5ca91a83706357167 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
Ralf Baechle70342282013-01-22 12:59:30 +01007 * Copyright (C) 2000, 2001, 06 Ralf Baechle <ralf@linux-mips.org>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000010
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/types.h>
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000012#include <linux/dma-mapping.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/mm.h>
Paul Gortmakerd9ba5772016-08-21 15:58:14 -040014#include <linux/export.h>
Jens Axboe4fcc47a2007-10-23 12:32:34 +020015#include <linux/scatterlist.h>
Ralf Baechle6e86b0b2007-10-29 19:35:33 +000016#include <linux/string.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/gfp.h>
Dezhong Diaoe36863a2010-10-13 16:57:35 -070018#include <linux/highmem.h>
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +010019#include <linux/dma-contiguous.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020
21#include <asm/cache.h>
Ralf Baechle69f24d12013-09-17 10:25:47 +020022#include <asm/cpu-type.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/io.h>
24
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000025#include <dma-coherence.h>
26
Paul Burton20d33062016-10-05 18:18:16 +010027#if defined(CONFIG_DMA_MAYBE_COHERENT) && !defined(CONFIG_DMA_PERDEV_COHERENT)
Paul Burtonf2302022016-10-05 18:18:14 +010028/* User defined DMA coherency from command line. */
29enum coherent_io_user_state coherentio = IO_COHERENCE_DEFAULT;
Steven J. Hillb6d92b42013-03-25 13:47:29 -050030EXPORT_SYMBOL_GPL(coherentio);
31int hw_coherentio = 0; /* Actual hardware supported DMA coherency setting. */
32
33static int __init setcoherentio(char *str)
34{
Paul Burtonf2302022016-10-05 18:18:14 +010035 coherentio = IO_COHERENCE_ENABLED;
Steven J. Hillb6d92b42013-03-25 13:47:29 -050036 pr_info("Hardware DMA cache coherency (command line)\n");
37 return 0;
38}
39early_param("coherentio", setcoherentio);
40
41static int __init setnocoherentio(char *str)
42{
Paul Burtonf2302022016-10-05 18:18:14 +010043 coherentio = IO_COHERENCE_DISABLED;
Steven J. Hillb6d92b42013-03-25 13:47:29 -050044 pr_info("Software DMA cache coherency (command line)\n");
45 return 0;
46}
47early_param("nocoherentio", setnocoherentio);
Felix Fietkau885014b2013-09-27 14:41:44 +020048#endif
Steven J. Hillb6d92b42013-03-25 13:47:29 -050049
Dezhong Diaoe36863a2010-10-13 16:57:35 -070050static inline struct page *dma_addr_to_page(struct device *dev,
Kevin Cernekee3807ef3f62009-04-23 17:25:12 -070051 dma_addr_t dma_addr)
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010052{
Dezhong Diaoe36863a2010-10-13 16:57:35 -070053 return pfn_to_page(
54 plat_dma_addr_to_phys(dev, dma_addr) >> PAGE_SHIFT);
Franck Bui-Huuc9d06962007-03-19 17:36:42 +010055}
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057/*
Jim Quinlanf86f55d2013-08-27 16:57:51 -040058 * The affected CPUs below in 'cpu_needs_post_dma_flush()' can
59 * speculatively fill random cachelines with stale data at any time,
60 * requiring an extra flush post-DMA.
61 *
Linus Torvalds1da177e2005-04-16 15:20:36 -070062 * Warning on the terminology - Linux calls an uncached area coherent;
63 * MIPS terminology calls memory areas with hardware maintained coherency
64 * coherent.
Ralf Baechle0dc294c2014-11-11 22:22:03 +010065 *
66 * Note that the R14000 and R16000 should also be checked for in this
67 * condition. However this function is only called on non-I/O-coherent
68 * systems and only the R10000 and R12000 are used in such systems, the
69 * SGI IP28 Indigo² rsp. SGI IP32 aka O2.
Linus Torvalds1da177e2005-04-16 15:20:36 -070070 */
Paul Burtoncad482c2017-06-13 10:01:08 -070071static inline bool cpu_needs_post_dma_flush(struct device *dev)
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000072{
Paul Burtoncad482c2017-06-13 10:01:08 -070073 if (plat_device_is_coherent(dev))
74 return false;
75
76 switch (boot_cpu_type()) {
77 case CPU_R10000:
78 case CPU_R12000:
79 case CPU_BMIPS5000:
80 return true;
81
82 default:
83 /*
84 * Presence of MAARs suggests that the CPU supports
85 * speculatively prefetching data, and therefore requires
86 * the post-DMA flush/invalidate.
87 */
88 return cpu_has_maar;
89 }
Ralf Baechle9a88cbb2006-11-16 02:56:12 +000090}
91
Ralf Baechlecce335a2007-11-03 02:05:43 +000092static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
93{
Ralf Baechlea2e715a2010-09-02 23:22:23 +020094 gfp_t dma_flag;
95
Ralf Baechlecce335a2007-11-03 02:05:43 +000096 /* ignore region specifiers */
97 gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
98
Ralf Baechlea2e715a2010-09-02 23:22:23 +020099#ifdef CONFIG_ISA
Ralf Baechlecce335a2007-11-03 02:05:43 +0000100 if (dev == NULL)
Ralf Baechlea2e715a2010-09-02 23:22:23 +0200101 dma_flag = __GFP_DMA;
Ralf Baechlecce335a2007-11-03 02:05:43 +0000102 else
103#endif
Ralf Baechlea2e715a2010-09-02 23:22:23 +0200104#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
Matt Redfearn8d4925e2015-12-21 15:21:42 +0000105 if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32))
Ralf Baechlea2e715a2010-09-02 23:22:23 +0200106 dma_flag = __GFP_DMA;
107 else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
108 dma_flag = __GFP_DMA32;
Ralf Baechlecce335a2007-11-03 02:05:43 +0000109 else
110#endif
Ralf Baechlea2e715a2010-09-02 23:22:23 +0200111#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
Matt Redfearn8d4925e2015-12-21 15:21:42 +0000112 if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64))
Ralf Baechlea2e715a2010-09-02 23:22:23 +0200113 dma_flag = __GFP_DMA32;
114 else
115#endif
116#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
Matt Redfearn8d4925e2015-12-21 15:21:42 +0000117 if (dev == NULL ||
118 dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
Ralf Baechlea2e715a2010-09-02 23:22:23 +0200119 dma_flag = __GFP_DMA;
120 else
121#endif
122 dma_flag = 0;
Ralf Baechlecce335a2007-11-03 02:05:43 +0000123
124 /* Don't invoke OOM killer */
125 gfp |= __GFP_NORETRY;
126
Ralf Baechlea2e715a2010-09-02 23:22:23 +0200127 return gfp | dma_flag;
Ralf Baechlecce335a2007-11-03 02:05:43 +0000128}
129
David Daney48e1fd52010-10-01 13:27:32 -0700130static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700131 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132{
133 void *ret;
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100134 struct page *page = NULL;
135 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
Ralf Baechlecce335a2007-11-03 02:05:43 +0000137 gfp = massage_gfp_flags(dev, gfp);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000138
Qais Yousef9530d0f2015-12-11 13:41:09 -0800139 if (IS_ENABLED(CONFIG_DMA_CMA) && gfpflags_allow_blocking(gfp))
Lucas Stach712c6042017-02-24 14:58:44 -0800140 page = dma_alloc_from_contiguous(dev, count, get_order(size),
141 gfp);
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100142 if (!page)
143 page = alloc_pages(gfp, get_order(size));
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000144
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100145 if (!page)
146 return NULL;
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000147
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100148 ret = page_address(page);
149 memset(ret, 0, size);
150 *dma_handle = plat_map_dma_mem(dev, ret, size);
Christoph Hellwigc59435a2017-08-25 17:07:12 +0200151 if (!(attrs & DMA_ATTR_NON_CONSISTENT) &&
152 !plat_device_is_coherent(dev)) {
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100153 dma_cache_wback_inv((unsigned long) ret, size);
Paul Burtoncfa93fb2016-10-05 18:18:15 +0100154 ret = UNCAC_ADDR(ret);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 }
156
157 return ret;
158}
159
David Daney48e1fd52010-10-01 13:27:32 -0700160static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700161 dma_addr_t dma_handle, unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162{
163 unsigned long addr = (unsigned long) vaddr;
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100164 unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
165 struct page *page = NULL;
Yoichi Yuasaf8ac0422009-06-04 00:16:04 +0900166
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700167 plat_unmap_dma_mem(dev, dma_handle, size, DMA_BIDIRECTIONAL);
David Daney11531ac2008-12-10 18:14:45 -0800168
Christoph Hellwigc59435a2017-08-25 17:07:12 +0200169 if (!(attrs & DMA_ATTR_NON_CONSISTENT) && !plat_device_is_coherent(dev))
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000170 addr = CAC_ADDR(addr);
171
Zubair Lutfullah Kakakhelf4649382014-07-16 16:51:32 +0100172 page = virt_to_page((void *) addr);
173
174 if (!dma_release_from_contiguous(dev, page, count))
175 __free_pages(page, get_order(size));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700176}
177
Alex Smith8c172462015-07-30 12:03:42 +0100178static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
179 void *cpu_addr, dma_addr_t dma_addr, size_t size,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700180 unsigned long attrs)
Alex Smith8c172462015-07-30 12:03:42 +0100181{
Thomas Meyer7a8e4152017-09-21 00:29:36 +0200182 unsigned long user_count = vma_pages(vma);
Alex Smith8c172462015-07-30 12:03:42 +0100183 unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
184 unsigned long addr = (unsigned long)cpu_addr;
185 unsigned long off = vma->vm_pgoff;
186 unsigned long pfn;
187 int ret = -ENXIO;
188
Paul Burtoncfa93fb2016-10-05 18:18:15 +0100189 if (!plat_device_is_coherent(dev))
Alex Smith8c172462015-07-30 12:03:42 +0100190 addr = CAC_ADDR(addr);
191
192 pfn = page_to_pfn(virt_to_page((void *)addr));
193
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700194 if (attrs & DMA_ATTR_WRITE_COMBINE)
Alex Smith8c172462015-07-30 12:03:42 +0100195 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
196 else
197 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
198
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100199 if (dma_mmap_from_dev_coherent(dev, vma, cpu_addr, size, &ret))
Alex Smith8c172462015-07-30 12:03:42 +0100200 return ret;
201
202 if (off < count && user_count <= (count - off)) {
203 ret = remap_pfn_range(vma, vma->vm_start,
204 pfn + off,
205 user_count << PAGE_SHIFT,
206 vma->vm_page_prot);
207 }
208
209 return ret;
210}
211
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700212static inline void __dma_sync_virtual(void *addr, size_t size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 enum dma_data_direction direction)
214{
215 switch (direction) {
216 case DMA_TO_DEVICE:
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700217 dma_cache_wback((unsigned long)addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 break;
219
220 case DMA_FROM_DEVICE:
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700221 dma_cache_inv((unsigned long)addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 break;
223
224 case DMA_BIDIRECTIONAL:
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700225 dma_cache_wback_inv((unsigned long)addr, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 break;
227
228 default:
229 BUG();
230 }
231}
232
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700233/*
234 * A single sg entry may refer to multiple physically contiguous
235 * pages. But we still need to process highmem pages individually.
236 * If highmem is not configured then the bulk of this loop gets
237 * optimized out.
238 */
239static inline void __dma_sync(struct page *page,
240 unsigned long offset, size_t size, enum dma_data_direction direction)
241{
242 size_t left = size;
243
244 do {
245 size_t len = left;
246
247 if (PageHighMem(page)) {
248 void *addr;
249
250 if (offset + len > PAGE_SIZE) {
251 if (offset >= PAGE_SIZE) {
252 page += offset >> PAGE_SHIFT;
253 offset &= ~PAGE_MASK;
254 }
255 len = PAGE_SIZE - offset;
256 }
257
258 addr = kmap_atomic(page);
259 __dma_sync_virtual(addr + offset, len, direction);
260 kunmap_atomic(addr);
261 } else
262 __dma_sync_virtual(page_address(page) + offset,
263 size, direction);
264 offset = 0;
265 page++;
266 left -= len;
267 } while (left);
268}
269
David Daney48e1fd52010-10-01 13:27:32 -0700270static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700271 size_t size, enum dma_data_direction direction, unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272{
Alexander Duyck9f318d42016-12-14 15:04:58 -0800273 if (cpu_needs_post_dma_flush(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700274 __dma_sync(dma_addr_to_page(dev, dma_addr),
275 dma_addr & ~PAGE_MASK, size, direction);
Ralf Baechle0acbfc62015-03-27 15:10:30 +0100276 plat_post_dma_flush(dev);
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700277 plat_unmap_dma_mem(dev, dma_addr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278}
279
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900280static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700281 int nents, enum dma_data_direction direction, unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282{
283 int i;
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900284 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900286 for_each_sg(sglist, sg, nents, i) {
Alexander Duyck9f318d42016-12-14 15:04:58 -0800287 if (!plat_device_is_coherent(dev) &&
288 !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700289 __dma_sync(sg_page(sg), sg->offset, sg->length,
290 direction);
Jayachandran C4954a9a2013-06-10 06:28:08 +0000291#ifdef CONFIG_NEED_SG_DMA_LENGTH
292 sg->dma_length = sg->length;
293#endif
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700294 sg->dma_address = plat_map_dma_mem_page(dev, sg_page(sg)) +
295 sg->offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 }
297
298 return nents;
299}
300
David Daney48e1fd52010-10-01 13:27:32 -0700301static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
302 unsigned long offset, size_t size, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700303 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304{
Alexander Duyck9f318d42016-12-14 15:04:58 -0800305 if (!plat_device_is_coherent(dev) && !(attrs & DMA_ATTR_SKIP_CPU_SYNC))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700306 __dma_sync(page, offset, size, direction);
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000307
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700308 return plat_map_dma_mem_page(dev, page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309}
310
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900311static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
David Daney48e1fd52010-10-01 13:27:32 -0700312 int nhwentries, enum dma_data_direction direction,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700313 unsigned long attrs)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700315 int i;
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900316 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900318 for_each_sg(sglist, sg, nhwentries, i) {
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000319 if (!plat_device_is_coherent(dev) &&
Alexander Duyck9f318d42016-12-14 15:04:58 -0800320 !(attrs & DMA_ATTR_SKIP_CPU_SYNC) &&
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700321 direction != DMA_TO_DEVICE)
322 __dma_sync(sg_page(sg), sg->offset, sg->length,
323 direction);
Kevin Cernekeed3f634b2009-04-23 17:03:43 -0700324 plat_unmap_dma_mem(dev, sg->dma_address, sg->length, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325 }
326}
327
David Daney48e1fd52010-10-01 13:27:32 -0700328static void mips_dma_sync_single_for_cpu(struct device *dev,
329 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330{
Jim Quinlanf86f55d2013-08-27 16:57:51 -0400331 if (cpu_needs_post_dma_flush(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700332 __dma_sync(dma_addr_to_page(dev, dma_handle),
333 dma_handle & ~PAGE_MASK, size, direction);
Ralf Baechle0acbfc62015-03-27 15:10:30 +0100334 plat_post_dma_flush(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335}
336
David Daney48e1fd52010-10-01 13:27:32 -0700337static void mips_dma_sync_single_for_device(struct device *dev,
338 dma_addr_t dma_handle, size_t size, enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339{
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700340 if (!plat_device_is_coherent(dev))
341 __dma_sync(dma_addr_to_page(dev, dma_handle),
342 dma_handle & ~PAGE_MASK, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343}
344
David Daney48e1fd52010-10-01 13:27:32 -0700345static void mips_dma_sync_sg_for_cpu(struct device *dev,
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900346 struct scatterlist *sglist, int nelems,
347 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348{
349 int i;
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900350 struct scatterlist *sg;
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700351
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900352 if (cpu_needs_post_dma_flush(dev)) {
353 for_each_sg(sglist, sg, nelems, i) {
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700354 __dma_sync(sg_page(sg), sg->offset, sg->length,
355 direction);
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900356 }
357 }
Ralf Baechle0acbfc62015-03-27 15:10:30 +0100358 plat_post_dma_flush(dev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359}
360
David Daney48e1fd52010-10-01 13:27:32 -0700361static void mips_dma_sync_sg_for_device(struct device *dev,
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900362 struct scatterlist *sglist, int nelems,
363 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364{
365 int i;
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900366 struct scatterlist *sg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900368 if (!plat_device_is_coherent(dev)) {
369 for_each_sg(sglist, sg, nelems, i) {
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700370 __dma_sync(sg_page(sg), sg->offset, sg->length,
371 direction);
Akinobu Mita1e51714c2015-05-01 22:56:38 +0900372 }
373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374}
375
Paul Burtonb7fc2cc2017-08-23 11:17:54 -0700376static int mips_dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700377{
Felix Fietkau4e7f7262013-08-15 11:28:30 +0200378 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379}
380
Paul Burtonb7fc2cc2017-08-23 11:17:54 -0700381static int mips_dma_supported(struct device *dev, u64 mask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382{
David Daney843aef42008-12-11 15:33:36 -0800383 return plat_dma_supported(dev, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384}
385
Christoph Hellwigc9eb6172017-08-27 10:37:15 +0200386static void mips_dma_cache_sync(struct device *dev, void *vaddr, size_t size,
David Daney48e1fd52010-10-01 13:27:32 -0700387 enum dma_data_direction direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388{
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000389 BUG_ON(direction == DMA_NONE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390
Ralf Baechle9a88cbb2006-11-16 02:56:12 +0000391 if (!plat_device_is_coherent(dev))
Dezhong Diaoe36863a2010-10-13 16:57:35 -0700392 __dma_sync_virtual(vaddr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393}
394
Bart Van Assche52997092017-01-20 13:04:01 -0800395static const struct dma_map_ops mips_default_dma_map_ops = {
Andrzej Pietrasiewicze8d51e52012-03-27 14:32:21 +0200396 .alloc = mips_dma_alloc_coherent,
397 .free = mips_dma_free_coherent,
Alex Smith8c172462015-07-30 12:03:42 +0100398 .mmap = mips_dma_mmap,
David Daney48e1fd52010-10-01 13:27:32 -0700399 .map_page = mips_dma_map_page,
400 .unmap_page = mips_dma_unmap_page,
401 .map_sg = mips_dma_map_sg,
402 .unmap_sg = mips_dma_unmap_sg,
403 .sync_single_for_cpu = mips_dma_sync_single_for_cpu,
404 .sync_single_for_device = mips_dma_sync_single_for_device,
405 .sync_sg_for_cpu = mips_dma_sync_sg_for_cpu,
406 .sync_sg_for_device = mips_dma_sync_sg_for_device,
407 .mapping_error = mips_dma_mapping_error,
Christoph Hellwigc9eb6172017-08-27 10:37:15 +0200408 .dma_supported = mips_dma_supported,
409 .cache_sync = mips_dma_cache_sync,
David Daney48e1fd52010-10-01 13:27:32 -0700410};
411
Bart Van Assche52997092017-01-20 13:04:01 -0800412const struct dma_map_ops *mips_dma_map_ops = &mips_default_dma_map_ops;
David Daney48e1fd52010-10-01 13:27:32 -0700413EXPORT_SYMBOL(mips_dma_map_ops);
414
415#define PREALLOC_DMA_DEBUG_ENTRIES (1 << 16)
416
417static int __init mips_dma_init(void)
418{
419 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
420
421 return 0;
422}
423fs_initcall(mips_dma_init);