blob: ff46dfa68a97f99c3c3db8854dac35d31b062dbd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
Russell King98ed7d42008-08-10 12:10:49 +01006#include <linux/mm_types.h>
Jens Axboedee9ba82007-10-23 12:37:59 +02007#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +04009#include <asm-generic/dma-coherent.h>
Russell King98ed7d42008-08-10 12:10:49 +010010#include <asm/memory.h>
11
12/*
13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
14 * used internally by the DMA-mapping API to provide DMA addresses. They
15 * must not be used by drivers.
16 */
17#ifndef __arch_page_to_dma
Nicolas Pitre58edb512008-09-09 15:54:13 -040018
19#if !defined(CONFIG_HIGHMEM)
Russell King98ed7d42008-08-10 12:10:49 +010020static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
21{
22 return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page));
23}
Nicolas Pitre58edb512008-09-09 15:54:13 -040024#elif defined(__pfn_to_bus)
25static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
26{
27 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
28}
29#else
30#error "this machine class needs to define __arch_page_to_dma to use HIGHMEM"
31#endif
Russell King98ed7d42008-08-10 12:10:49 +010032
33static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
34{
35 return (void *)__bus_to_virt(addr);
36}
37
38static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
39{
40 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
41}
42#else
43static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
44{
45 return __arch_page_to_dma(dev, page);
46}
47
48static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
49{
50 return __arch_dma_to_virt(dev, addr);
51}
52
53static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
54{
55 return __arch_virt_to_dma(dev, addr);
56}
57#endif
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +040058
Linus Torvalds1da177e2005-04-16 15:20:36 -070059/*
60 * DMA-consistent mapping functions. These allocate/free a region of
61 * uncached, unwrite-buffered mapped memory space for use with DMA
62 * devices. This is the "generic" version. The PCI specific version
63 * is in pci.h
Dan Williams105ef9a2006-11-21 22:57:23 +010064 *
65 * Note: Drivers should NOT use this function directly, as it will break
66 * platforms with CONFIG_DMABOUNCE.
67 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -070068 */
Russell King84aa4622007-10-09 14:17:01 +010069extern void dma_cache_maint(const void *kaddr, size_t size, int rw);
Nicolas Pitre43377452009-03-12 22:52:09 -040070extern void dma_cache_maint_page(struct page *page, unsigned long offset,
71 size_t size, int rw);
Linus Torvalds1da177e2005-04-16 15:20:36 -070072
73/*
74 * Return whether the given device DMA address mask can be supported
75 * properly. For example, if your device can only drive the low 24-bits
76 * during bus mastering, then you would pass 0x00ffffff as the mask
77 * to this function.
akpm@osdl.org7a228aa2005-04-16 15:23:57 -070078 *
79 * FIXME: This should really be a platform specific issue - we should
80 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
Linus Torvalds1da177e2005-04-16 15:20:36 -070081 */
82static inline int dma_supported(struct device *dev, u64 mask)
83{
Russell King1124d6d2008-10-20 11:18:40 +010084 if (mask < ISA_DMA_THRESHOLD)
85 return 0;
86 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -070087}
88
89static inline int dma_set_mask(struct device *dev, u64 dma_mask)
90{
91 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
92 return -EIO;
93
94 *dev->dma_mask = dma_mask;
95
96 return 0;
97}
98
99static inline int dma_get_cache_alignment(void)
100{
101 return 32;
102}
103
Ralf Baechlef67637e2006-12-06 20:38:54 -0800104static inline int dma_is_consistent(struct device *dev, dma_addr_t handle)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105{
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100106 return !!arch_is_coherent();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107}
108
109/*
110 * DMA errors are defined by all-bits-set in the DMA address.
111 */
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700112static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113{
114 return dma_addr == ~0;
115}
116
Russell Kingf454aa62007-02-12 19:26:05 +0000117/*
118 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
119 * function so drivers using this API are highlighted with build warnings.
120 */
Russell King3216a972008-09-25 22:23:31 +0100121static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
122 dma_addr_t *handle, gfp_t gfp)
Russell Kingf454aa62007-02-12 19:26:05 +0000123{
124 return NULL;
125}
126
Russell King3216a972008-09-25 22:23:31 +0100127static inline void dma_free_noncoherent(struct device *dev, size_t size,
128 void *cpu_addr, dma_addr_t handle)
Russell Kingf454aa62007-02-12 19:26:05 +0000129{
130}
131
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132/**
133 * dma_alloc_coherent - allocate consistent memory for DMA
134 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
135 * @size: required memory size
136 * @handle: bus-specific DMA address
137 *
138 * Allocate some uncached, unbuffered memory for a device for
139 * performing DMA. This function allocates pages, and will
140 * return the CPU-viewed address, and sets @handle to be the
141 * device-viewed address.
142 */
Russell King3216a972008-09-25 22:23:31 +0100143extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
145/**
146 * dma_free_coherent - free memory allocated by dma_alloc_coherent
147 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
148 * @size: size of memory originally requested in dma_alloc_coherent
149 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
150 * @handle: device-view address returned from dma_alloc_coherent
151 *
152 * Free (and unmap) a DMA buffer previously allocated by
153 * dma_alloc_coherent().
154 *
155 * References to memory and mappings associated with cpu_addr/handle
156 * during and after this call executing are illegal.
157 */
Russell King3216a972008-09-25 22:23:31 +0100158extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159
160/**
161 * dma_mmap_coherent - map a coherent DMA allocation into user space
162 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
163 * @vma: vm_area_struct describing requested user mapping
164 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
165 * @handle: device-view address returned from dma_alloc_coherent
166 * @size: size of memory originally requested in dma_alloc_coherent
167 *
168 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
169 * into user space. The coherent DMA buffer must not be freed by the
170 * driver until the user space mapping has been released.
171 */
Russell King3216a972008-09-25 22:23:31 +0100172int dma_mmap_coherent(struct device *, struct vm_area_struct *,
173 void *, dma_addr_t, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
175
176/**
177 * dma_alloc_writecombine - allocate writecombining memory for DMA
178 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
179 * @size: required memory size
180 * @handle: bus-specific DMA address
181 *
182 * Allocate some uncached, buffered memory for a device for
183 * performing DMA. This function allocates pages, and will
184 * return the CPU-viewed address, and sets @handle to be the
185 * device-viewed address.
186 */
Russell King3216a972008-09-25 22:23:31 +0100187extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
188 gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
190#define dma_free_writecombine(dev,size,cpu_addr,handle) \
191 dma_free_coherent(dev,size,cpu_addr,handle)
192
Russell King3216a972008-09-25 22:23:31 +0100193int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
194 void *, dma_addr_t, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195
196
Russell King8c8a0ec52008-09-25 21:52:49 +0100197#ifdef CONFIG_DMABOUNCE
198/*
199 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
200 * and utilize bounce buffers as needed to work around limited DMA windows.
201 *
202 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
203 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
204 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
205 *
206 * The following are helper functions used by the dmabounce subystem
207 *
208 */
209
210/**
211 * dmabounce_register_dev
212 *
213 * @dev: valid struct device pointer
214 * @small_buf_size: size of buffers to use with small buffer pool
215 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
216 *
217 * This function should be called by low-level platform code to register
218 * a device as requireing DMA buffer bouncing. The function will allocate
219 * appropriate DMA pools for the device.
220 *
221 */
Russell King3216a972008-09-25 22:23:31 +0100222extern int dmabounce_register_dev(struct device *, unsigned long,
223 unsigned long);
Russell King8c8a0ec52008-09-25 21:52:49 +0100224
225/**
226 * dmabounce_unregister_dev
227 *
228 * @dev: valid struct device pointer
229 *
230 * This function should be called by low-level platform code when device
231 * that was previously registered with dmabounce_register_dev is removed
232 * from the system.
233 *
234 */
235extern void dmabounce_unregister_dev(struct device *);
236
237/**
238 * dma_needs_bounce
239 *
240 * @dev: valid struct device pointer
241 * @dma_handle: dma_handle of unbounced buffer
242 * @size: size of region being mapped
243 *
244 * Platforms that utilize the dmabounce mechanism must implement
245 * this function.
246 *
247 * The dmabounce routines call this function whenever a dma-mapping
248 * is requested to determine whether a given buffer needs to be bounced
249 * or not. The function must return 0 if the buffer is OK for
250 * DMA access and 1 if the buffer needs to be bounced.
251 *
252 */
253extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
254
255/*
Russell King125ab122008-09-25 22:16:22 +0100256 * The DMA API, implemented by dmabounce.c. See below for descriptions.
257 */
Russell King3216a972008-09-25 22:23:31 +0100258extern dma_addr_t dma_map_single(struct device *, void *, size_t,
259 enum dma_data_direction);
260extern dma_addr_t dma_map_page(struct device *, struct page *,
261 unsigned long, size_t, enum dma_data_direction);
262extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
263 enum dma_data_direction);
Russell King125ab122008-09-25 22:16:22 +0100264
265/*
Russell King8c8a0ec52008-09-25 21:52:49 +0100266 * Private functions
267 */
268int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
Russell King3216a972008-09-25 22:23:31 +0100269 size_t, enum dma_data_direction);
Russell King8c8a0ec52008-09-25 21:52:49 +0100270int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
Russell King3216a972008-09-25 22:23:31 +0100271 size_t, enum dma_data_direction);
Russell King8c8a0ec52008-09-25 21:52:49 +0100272#else
Russell King9fa76792008-11-13 14:33:51 +0000273static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
274 unsigned long offset, size_t size, enum dma_data_direction dir)
275{
276 return 1;
277}
278
279static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
280 unsigned long offset, size_t size, enum dma_data_direction dir)
281{
282 return 1;
283}
Russell King8c8a0ec52008-09-25 21:52:49 +0100284
285
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286/**
287 * dma_map_single - map a single buffer for streaming DMA
288 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
289 * @cpu_addr: CPU direct mapped address of buffer
290 * @size: size of buffer to map
291 * @dir: DMA transfer direction
292 *
293 * Ensure that any data held in the cache is appropriately discarded
294 * or written back.
295 *
296 * The device owns this memory once this call has completed. The CPU
297 * can regain ownership by calling dma_unmap_single() or
298 * dma_sync_single_for_cpu().
299 */
Russell King3216a972008-09-25 22:23:31 +0100300static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
301 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700302{
Russell King0e18b5d2008-09-29 13:48:17 +0100303 BUG_ON(!valid_dma_direction(dir));
304
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100305 if (!arch_is_coherent())
Russell King84aa4622007-10-09 14:17:01 +0100306 dma_cache_maint(cpu_addr, size, dir);
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100307
Russell King98ed7d42008-08-10 12:10:49 +0100308 return virt_to_dma(dev, cpu_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309}
Russell King125ab122008-09-25 22:16:22 +0100310
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311/**
312 * dma_map_page - map a portion of a page for streaming DMA
313 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
314 * @page: page that buffer resides in
315 * @offset: offset into page for start of buffer
316 * @size: size of buffer to map
317 * @dir: DMA transfer direction
318 *
319 * Ensure that any data held in the cache is appropriately discarded
320 * or written back.
321 *
322 * The device owns this memory once this call has completed. The CPU
Russell King7807c602008-09-30 11:30:24 +0100323 * can regain ownership by calling dma_unmap_page().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 */
Russell King3216a972008-09-25 22:23:31 +0100325static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
326 unsigned long offset, size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
Russell King0e18b5d2008-09-29 13:48:17 +0100328 BUG_ON(!valid_dma_direction(dir));
329
Russell King56f55f82008-09-25 20:59:12 +0100330 if (!arch_is_coherent())
Nicolas Pitre43377452009-03-12 22:52:09 -0400331 dma_cache_maint_page(page, offset, size, dir);
Russell King56f55f82008-09-25 20:59:12 +0100332
333 return page_to_dma(dev, page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334}
335
336/**
337 * dma_unmap_single - unmap a single buffer previously mapped
338 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
339 * @handle: DMA address of buffer
Russell King7807c602008-09-30 11:30:24 +0100340 * @size: size of buffer (same as passed to dma_map_single)
341 * @dir: DMA transfer direction (same as passed to dma_map_single)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 *
343 * Unmap a single streaming mode DMA translation. The handle and size
344 * must match what was provided in the previous dma_map_single() call.
345 * All other usages are undefined.
346 *
347 * After this call, reads by the CPU to the buffer are guaranteed to see
348 * whatever the device wrote there.
349 */
Russell King3216a972008-09-25 22:23:31 +0100350static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
351 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352{
353 /* nothing to do */
354}
Russell King125ab122008-09-25 22:16:22 +0100355#endif /* CONFIG_DMABOUNCE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356
357/**
358 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
359 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
360 * @handle: DMA address of buffer
Russell King7807c602008-09-30 11:30:24 +0100361 * @size: size of buffer (same as passed to dma_map_page)
362 * @dir: DMA transfer direction (same as passed to dma_map_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 *
Russell King7807c602008-09-30 11:30:24 +0100364 * Unmap a page streaming mode DMA translation. The handle and size
365 * must match what was provided in the previous dma_map_page() call.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 * All other usages are undefined.
367 *
368 * After this call, reads by the CPU to the buffer are guaranteed to see
369 * whatever the device wrote there.
370 */
Russell King3216a972008-09-25 22:23:31 +0100371static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
372 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373{
Russell King98ed7d42008-08-10 12:10:49 +0100374 dma_unmap_single(dev, handle, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375}
376
377/**
Russell King9dd42862008-08-10 12:18:26 +0100378 * dma_sync_single_range_for_cpu
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
380 * @handle: DMA address of buffer
Russell King9dd42862008-08-10 12:18:26 +0100381 * @offset: offset of region to start sync
382 * @size: size of region to sync
383 * @dir: DMA transfer direction (same as passed to dma_map_single)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 *
385 * Make physical memory consistent for a single streaming mode DMA
386 * translation after a transfer.
387 *
388 * If you perform a dma_map_single() but wish to interrogate the
389 * buffer using the cpu, yet do not wish to teardown the PCI dma
390 * mapping, you must call this function before doing so. At the
391 * next point you give the PCI dma address back to the card, you
392 * must first the perform a dma_sync_for_device, and then the
393 * device again owns the buffer.
394 */
Russell King3216a972008-09-25 22:23:31 +0100395static inline void dma_sync_single_range_for_cpu(struct device *dev,
396 dma_addr_t handle, unsigned long offset, size_t size,
397 enum dma_data_direction dir)
Russell King9dd42862008-08-10 12:18:26 +0100398{
Russell King0e18b5d2008-09-29 13:48:17 +0100399 BUG_ON(!valid_dma_direction(dir));
400
Russell King309dbba2008-09-29 19:50:59 +0100401 dmabounce_sync_for_cpu(dev, handle, offset, size, dir);
Russell King9dd42862008-08-10 12:18:26 +0100402}
403
Russell King3216a972008-09-25 22:23:31 +0100404static inline void dma_sync_single_range_for_device(struct device *dev,
405 dma_addr_t handle, unsigned long offset, size_t size,
406 enum dma_data_direction dir)
Russell King9dd42862008-08-10 12:18:26 +0100407{
Russell King0e18b5d2008-09-29 13:48:17 +0100408 BUG_ON(!valid_dma_direction(dir));
409
Russell King8c8a0ec52008-09-25 21:52:49 +0100410 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
411 return;
412
Russell King9dd42862008-08-10 12:18:26 +0100413 if (!arch_is_coherent())
414 dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir);
415}
Russell King9dd42862008-08-10 12:18:26 +0100416
Russell King3216a972008-09-25 22:23:31 +0100417static inline void dma_sync_single_for_cpu(struct device *dev,
418 dma_addr_t handle, size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419{
Russell King9dd42862008-08-10 12:18:26 +0100420 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
422
Russell King3216a972008-09-25 22:23:31 +0100423static inline void dma_sync_single_for_device(struct device *dev,
424 dma_addr_t handle, size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425{
Russell King9dd42862008-08-10 12:18:26 +0100426 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Russell Kingafd1a322008-09-25 16:30:57 +0100429/*
430 * The scatter list versions of the above methods.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 */
Russell King3216a972008-09-25 22:23:31 +0100432extern int dma_map_sg(struct device *, struct scatterlist *, int,
433 enum dma_data_direction);
434extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
435 enum dma_data_direction);
436extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
437 enum dma_data_direction);
438extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
439 enum dma_data_direction);
Russell Kingafd1a322008-09-25 16:30:57 +0100440
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442#endif /* __KERNEL__ */
443#endif