blob: abb222f40c20a15bdbf352541710cc36ddcf0ce8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
Russell King98ed7d42008-08-10 12:10:49 +01006#include <linux/mm_types.h>
Jens Axboedee9ba82007-10-23 12:37:59 +02007#include <linux/scatterlist.h>
Marek Szyprowskia93786a2012-05-16 18:31:23 +02008#include <linux/dma-attrs.h>
Russell King24056f52011-01-03 11:29:28 +00009#include <linux/dma-debug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +040011#include <asm-generic/dma-coherent.h>
Russell King98ed7d42008-08-10 12:10:49 +010012#include <asm/memory.h>
13
Marek Szyprowski1dc8f002012-02-29 14:45:28 +010014#define DMA_ERROR_CODE (~0)
Marek Szyprowskif6fe2822012-02-10 19:55:20 +010015extern struct dma_map_ops arm_dma_ops;
16
17static inline struct dma_map_ops *get_dma_ops(struct device *dev)
18{
19 if (dev && dev->archdata.dma_ops)
20 return dev->archdata.dma_ops;
21 return &arm_dma_ops;
22}
23
24static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops)
25{
26 BUG_ON(!dev);
27 dev->archdata.dma_ops = ops;
28}
29
30#include <asm-generic/dma-mapping-common.h>
31
32static inline int dma_set_mask(struct device *dev, u64 mask)
33{
34 return get_dma_ops(dev)->set_dma_mask(dev, mask);
35}
Marek Szyprowski1dc8f002012-02-29 14:45:28 +010036
Russell King9eedd962011-01-03 00:00:17 +000037#ifdef __arch_page_to_dma
38#error Please update to __arch_pfn_to_dma
39#endif
40
Russell King98ed7d42008-08-10 12:10:49 +010041/*
Russell King9eedd962011-01-03 00:00:17 +000042 * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private
43 * functions used internally by the DMA-mapping API to provide DMA
44 * addresses. They must not be used by drivers.
Russell King98ed7d42008-08-10 12:10:49 +010045 */
Russell King9eedd962011-01-03 00:00:17 +000046#ifndef __arch_pfn_to_dma
47static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
Nicolas Pitre58edb512008-09-09 15:54:13 -040048{
Russell King9eedd962011-01-03 00:00:17 +000049 return (dma_addr_t)__pfn_to_bus(pfn);
Nicolas Pitre58edb512008-09-09 15:54:13 -040050}
Russell King98ed7d42008-08-10 12:10:49 +010051
Russell King9eedd962011-01-03 00:00:17 +000052static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
Russell Kingef1baed2009-10-31 16:07:16 +000053{
Russell King9eedd962011-01-03 00:00:17 +000054 return __bus_to_pfn(addr);
Russell Kingef1baed2009-10-31 16:07:16 +000055}
56
Russell King98ed7d42008-08-10 12:10:49 +010057static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
58{
Catalin Marinas01f461a2011-08-23 13:59:14 +010059 return (void *)__bus_to_virt((unsigned long)addr);
Russell King98ed7d42008-08-10 12:10:49 +010060}
61
62static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
63{
64 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
65}
66#else
Russell King9eedd962011-01-03 00:00:17 +000067static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn)
Russell King98ed7d42008-08-10 12:10:49 +010068{
Russell King9eedd962011-01-03 00:00:17 +000069 return __arch_pfn_to_dma(dev, pfn);
Russell King98ed7d42008-08-10 12:10:49 +010070}
71
Russell King9eedd962011-01-03 00:00:17 +000072static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr)
Russell Kingef1baed2009-10-31 16:07:16 +000073{
Russell King9eedd962011-01-03 00:00:17 +000074 return __arch_dma_to_pfn(dev, addr);
Russell Kingef1baed2009-10-31 16:07:16 +000075}
76
Russell King98ed7d42008-08-10 12:10:49 +010077static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
78{
79 return __arch_dma_to_virt(dev, addr);
80}
81
82static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
83{
84 return __arch_virt_to_dma(dev, addr);
85}
86#endif
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +040087
Linus Torvalds1da177e2005-04-16 15:20:36 -070088/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070089 * DMA errors are defined by all-bits-set in the DMA address.
90 */
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070091static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070092{
Marek Szyprowski1dc8f002012-02-29 14:45:28 +010093 return dma_addr == DMA_ERROR_CODE;
Linus Torvalds1da177e2005-04-16 15:20:36 -070094}
95
Russell Kingf454aa62007-02-12 19:26:05 +000096/*
97 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
98 * function so drivers using this API are highlighted with build warnings.
99 */
Russell King3216a972008-09-25 22:23:31 +0100100static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
101 dma_addr_t *handle, gfp_t gfp)
Russell Kingf454aa62007-02-12 19:26:05 +0000102{
103 return NULL;
104}
105
Russell King3216a972008-09-25 22:23:31 +0100106static inline void dma_free_noncoherent(struct device *dev, size_t size,
107 void *cpu_addr, dma_addr_t handle)
Russell Kingf454aa62007-02-12 19:26:05 +0000108{
109}
110
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700111
112/*
113 * dma_coherent_pre_ops - barrier functions for coherent memory before DMA.
114 * A barrier is required to ensure memory operations are complete before the
115 * initiation of a DMA xfer.
116 * If the coherent memory is Strongly Ordered
117 * - pre ARMv7 and 8x50 guarantees ordering wrt other mem accesses
118 * - ARMv7 guarantees ordering only within a 1KB block, so we need a barrier
119 * If coherent memory is normal then we need a barrier to prevent
120 * reordering
121 */
122static inline void dma_coherent_pre_ops(void)
123{
124#if COHERENT_IS_NORMAL == 1
125 dmb();
126#else
127 if (arch_is_coherent())
128 dmb();
129 else
130 barrier();
131#endif
132}
133/*
134 * dma_post_coherent_ops - barrier functions for coherent memory after DMA.
135 * If the coherent memory is Strongly Ordered we dont need a barrier since
136 * there are no speculative fetches to Strongly Ordered memory.
137 * If coherent memory is normal then we need a barrier to prevent reordering
138 */
139static inline void dma_coherent_post_ops(void)
140{
141#if COHERENT_IS_NORMAL == 1
142 dmb();
143#else
144 if (arch_is_coherent())
145 dmb();
146 else
147 barrier();
148#endif
149}
150
Marek Szyprowski75cd6512012-02-10 19:55:20 +0100151extern int dma_supported(struct device *dev, u64 mask);
152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153/**
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200154 * arm_dma_alloc - allocate consistent memory for DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
156 * @size: required memory size
157 * @handle: bus-specific DMA address
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200158 * @attrs: optinal attributes that specific mapping properties
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 *
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200160 * Allocate some memory for a device for performing DMA. This function
161 * allocates pages, and will return the CPU-viewed address, and sets @handle
162 * to be the device-viewed address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 */
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200164extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle,
165 gfp_t gfp, struct dma_attrs *attrs);
166
167#define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL)
168
169static inline void *dma_alloc_attrs(struct device *dev, size_t size,
170 dma_addr_t *dma_handle, gfp_t flag,
171 struct dma_attrs *attrs)
172{
173 struct dma_map_ops *ops = get_dma_ops(dev);
174 void *cpu_addr;
175 BUG_ON(!ops);
176
177 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
178 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
179 return cpu_addr;
180}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181
182/**
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200183 * arm_dma_free - free memory allocated by arm_dma_alloc
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
185 * @size: size of memory originally requested in dma_alloc_coherent
186 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
187 * @handle: device-view address returned from dma_alloc_coherent
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200188 * @attrs: optinal attributes that specific mapping properties
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 *
190 * Free (and unmap) a DMA buffer previously allocated by
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200191 * arm_dma_alloc().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 *
193 * References to memory and mappings associated with cpu_addr/handle
194 * during and after this call executing are illegal.
195 */
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200196extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr,
197 dma_addr_t handle, struct dma_attrs *attrs);
198
199#define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL)
200
201static inline void dma_free_attrs(struct device *dev, size_t size,
202 void *cpu_addr, dma_addr_t dma_handle,
203 struct dma_attrs *attrs)
204{
205 struct dma_map_ops *ops = get_dma_ops(dev);
206 BUG_ON(!ops);
207
208 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
209 ops->free(dev, size, cpu_addr, dma_handle, attrs);
210}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212/**
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200213 * arm_dma_mmap - map a coherent DMA allocation into user space
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
215 * @vma: vm_area_struct describing requested user mapping
216 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
217 * @handle: device-view address returned from dma_alloc_coherent
218 * @size: size of memory originally requested in dma_alloc_coherent
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200219 * @attrs: optinal attributes that specific mapping properties
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 *
221 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
222 * into user space. The coherent DMA buffer must not be freed by the
223 * driver until the user space mapping has been released.
224 */
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200225extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma,
226 void *cpu_addr, dma_addr_t dma_addr, size_t size,
227 struct dma_attrs *attrs);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200229#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200231static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
232 void *cpu_addr, dma_addr_t dma_addr,
233 size_t size, struct dma_attrs *attrs)
234{
235 struct dma_map_ops *ops = get_dma_ops(dev);
236 BUG_ON(!ops);
237 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
238}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200240static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
241 dma_addr_t *dma_handle, gfp_t flag)
242{
243 DEFINE_DMA_ATTRS(attrs);
244 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
245 return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
246}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247
Marek Szyprowskia93786a2012-05-16 18:31:23 +0200248static inline void dma_free_writecombine(struct device *dev, size_t size,
249 void *cpu_addr, dma_addr_t dma_handle)
250{
251 DEFINE_DMA_ATTRS(attrs);
252 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
253 return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
254}
255
256static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
257 void *cpu_addr, dma_addr_t dma_addr, size_t size)
258{
259 DEFINE_DMA_ATTRS(attrs);
260 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
261 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
262}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263
Laura Abbotte78ee492012-10-29 12:53:22 -0700264static inline void *dma_alloc_stronglyordered(struct device *dev, size_t size,
265 dma_addr_t *dma_handle, gfp_t flag)
266{
267 DEFINE_DMA_ATTRS(attrs);
268 dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
269 return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
270}
271
272static inline void dma_free_stronglyordered(struct device *dev, size_t size,
273 void *cpu_addr, dma_addr_t dma_handle)
274{
275 DEFINE_DMA_ATTRS(attrs);
276 dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
277 return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
278}
279
280static inline int dma_mmap_stronglyordered(struct device *dev,
281 struct vm_area_struct *vma, void *cpu_addr,
282 dma_addr_t dma_addr, size_t size)
283{
284 DEFINE_DMA_ATTRS(attrs);
285 dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs);
286 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
287}
288
289static inline void *dma_alloc_nonconsistent(struct device *dev, size_t size,
290 dma_addr_t *dma_handle, gfp_t flag)
291{
292 DEFINE_DMA_ATTRS(attrs);
293 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
294 return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs);
295}
296
297static inline void dma_free_nonconsistent(struct device *dev, size_t size,
298 void *cpu_addr, dma_addr_t dma_handle)
299{
300 DEFINE_DMA_ATTRS(attrs);
301 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
302 return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
303}
304
305static inline int dma_mmap_nonconsistent(struct device *dev,
306 struct vm_area_struct *vma, void *cpu_addr,
307 dma_addr_t dma_addr, size_t size)
308{
309 DEFINE_DMA_ATTRS(attrs);
310 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
311 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
312}
313
314
315
Jon Medhurst99d17172011-08-02 17:28:27 +0100316/*
317 * This can be called during boot to increase the size of the consistent
318 * DMA region above it's default value of 2MB. It must be called before the
319 * memory allocator is initialised, i.e. before any core_initcall.
320 */
321extern void __init init_consistent_dma_size(unsigned long size);
322
Russell King8c8a0ec2008-09-25 21:52:49 +0100323/*
324 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
325 * and utilize bounce buffers as needed to work around limited DMA windows.
326 *
327 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
328 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
329 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
330 *
331 * The following are helper functions used by the dmabounce subystem
332 *
333 */
334
335/**
336 * dmabounce_register_dev
337 *
338 * @dev: valid struct device pointer
339 * @small_buf_size: size of buffers to use with small buffer pool
340 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
Russell King0703ed22011-07-04 08:32:21 +0100341 * @needs_bounce_fn: called to determine whether buffer needs bouncing
Russell King8c8a0ec2008-09-25 21:52:49 +0100342 *
343 * This function should be called by low-level platform code to register
344 * a device as requireing DMA buffer bouncing. The function will allocate
345 * appropriate DMA pools for the device.
Russell King8c8a0ec2008-09-25 21:52:49 +0100346 */
Russell King3216a972008-09-25 22:23:31 +0100347extern int dmabounce_register_dev(struct device *, unsigned long,
Russell King0703ed22011-07-04 08:32:21 +0100348 unsigned long, int (*)(struct device *, dma_addr_t, size_t));
Russell King8c8a0ec2008-09-25 21:52:49 +0100349
350/**
351 * dmabounce_unregister_dev
352 *
353 * @dev: valid struct device pointer
354 *
355 * This function should be called by low-level platform code when device
356 * that was previously registered with dmabounce_register_dev is removed
357 * from the system.
358 *
359 */
360extern void dmabounce_unregister_dev(struct device *);
361
Russell King125ab122008-09-25 22:16:22 +0100362
Russell King24056f52011-01-03 11:29:28 +0000363
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364/**
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700365 * dma_cache_pre_ops - clean or invalidate cache before dma transfer is
366 * initiated and perform a barrier operation.
367 * @virtual_addr: A kernel logical or kernel virtual address
368 * @size: size of buffer to map
369 * @dir: DMA transfer direction
370 *
371 * Ensure that any data held in the cache is appropriately discarded
372 * or written back.
373 *
374 */
375static inline void dma_cache_pre_ops(void *virtual_addr,
376 size_t size, enum dma_data_direction dir)
377{
378 extern void ___dma_single_cpu_to_dev(const void *, size_t,
379 enum dma_data_direction);
380
381 BUG_ON(!valid_dma_direction(dir));
382
383 if (!arch_is_coherent())
384 ___dma_single_cpu_to_dev(virtual_addr, size, dir);
385}
386
387/**
388 * dma_cache_post_ops - clean or invalidate cache after dma transfer is
389 * initiated and perform a barrier operation.
390 * @virtual_addr: A kernel logical or kernel virtual address
391 * @size: size of buffer to map
392 * @dir: DMA transfer direction
393 *
394 * Ensure that any data held in the cache is appropriately discarded
395 * or written back.
396 *
397 */
398static inline void dma_cache_post_ops(void *virtual_addr,
399 size_t size, enum dma_data_direction dir)
400{
401 extern void ___dma_single_cpu_to_dev(const void *, size_t,
402 enum dma_data_direction);
403
404 BUG_ON(!valid_dma_direction(dir));
405
406 if (arch_has_speculative_dfetch() && !arch_is_coherent()
407 && dir != DMA_TO_DEVICE)
408 /*
409 * Treat DMA_BIDIRECTIONAL and DMA_FROM_DEVICE
410 * identically: invalidate
411 */
412 ___dma_single_cpu_to_dev(virtual_addr,
413 size, DMA_FROM_DEVICE);
414}
Russell Kingafd1a322008-09-25 16:30:57 +0100415/*
416 * The scatter list versions of the above methods.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 */
Marek Szyprowskif6fe2822012-02-10 19:55:20 +0100418extern int arm_dma_map_sg(struct device *, struct scatterlist *, int,
419 enum dma_data_direction, struct dma_attrs *attrs);
420extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int,
421 enum dma_data_direction, struct dma_attrs *attrs);
422extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
Russell King3216a972008-09-25 22:23:31 +0100423 enum dma_data_direction);
Marek Szyprowskif6fe2822012-02-10 19:55:20 +0100424extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
Russell King3216a972008-09-25 22:23:31 +0100425 enum dma_data_direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427#endif /* __KERNEL__ */
428#endif