blob: c226fe10553e2952ec982ef3ec5fcaf8538586e0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef ASMARM_DMA_MAPPING_H
2#define ASMARM_DMA_MAPPING_H
3
4#ifdef __KERNEL__
5
Russell King98ed7d42008-08-10 12:10:49 +01006#include <linux/mm_types.h>
Jens Axboedee9ba82007-10-23 12:37:59 +02007#include <linux/scatterlist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07008
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +04009#include <asm-generic/dma-coherent.h>
Russell King98ed7d42008-08-10 12:10:49 +010010#include <asm/memory.h>
11
12/*
13 * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions
14 * used internally by the DMA-mapping API to provide DMA addresses. They
15 * must not be used by drivers.
16 */
17#ifndef __arch_page_to_dma
Nicolas Pitre58edb512008-09-09 15:54:13 -040018static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
19{
20 return (dma_addr_t)__pfn_to_bus(page_to_pfn(page));
21}
Russell King98ed7d42008-08-10 12:10:49 +010022
Russell Kingef1baed2009-10-31 16:07:16 +000023static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
24{
25 return pfn_to_page(__bus_to_pfn(addr));
26}
27
Russell King98ed7d42008-08-10 12:10:49 +010028static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
29{
30 return (void *)__bus_to_virt(addr);
31}
32
33static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
34{
35 return (dma_addr_t)__virt_to_bus((unsigned long)(addr));
36}
37#else
38static inline dma_addr_t page_to_dma(struct device *dev, struct page *page)
39{
40 return __arch_page_to_dma(dev, page);
41}
42
Russell Kingef1baed2009-10-31 16:07:16 +000043static inline struct page *dma_to_page(struct device *dev, dma_addr_t addr)
44{
45 return __arch_dma_to_page(dev, addr);
46}
47
Russell King98ed7d42008-08-10 12:10:49 +010048static inline void *dma_to_virt(struct device *dev, dma_addr_t addr)
49{
50 return __arch_dma_to_virt(dev, addr);
51}
52
53static inline dma_addr_t virt_to_dma(struct device *dev, void *addr)
54{
55 return __arch_virt_to_dma(dev, addr);
56}
57#endif
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +040058
Linus Torvalds1da177e2005-04-16 15:20:36 -070059/*
Russell King18eabe22009-10-31 16:52:16 +000060 * The DMA API is built upon the notion of "buffer ownership". A buffer
61 * is either exclusively owned by the CPU (and therefore may be accessed
62 * by it) or exclusively owned by the DMA device. These helper functions
63 * represent the transitions between these two ownership states.
64 *
Russell King4ea0d732009-11-24 16:27:17 +000065 * Note, however, that on later ARMs, this notion does not work due to
66 * speculative prefetches. We model our approach on the assumption that
67 * the CPU does do speculative prefetches, which means we clean caches
68 * before transfers and delay cache invalidation until transfer completion.
69 *
70 * Private support functions: these are not part of the API and are
71 * liable to change. Drivers must not use these.
Russell King18eabe22009-10-31 16:52:16 +000072 */
73static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size,
74 enum dma_data_direction dir)
75{
Russell King4ea0d732009-11-24 16:27:17 +000076 extern void ___dma_single_cpu_to_dev(const void *, size_t,
77 enum dma_data_direction);
78
Russell King18eabe22009-10-31 16:52:16 +000079 if (!arch_is_coherent())
Russell King4ea0d732009-11-24 16:27:17 +000080 ___dma_single_cpu_to_dev(kaddr, size, dir);
Russell King18eabe22009-10-31 16:52:16 +000081}
82
83static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size,
84 enum dma_data_direction dir)
85{
Russell King4ea0d732009-11-24 16:27:17 +000086 extern void ___dma_single_dev_to_cpu(const void *, size_t,
87 enum dma_data_direction);
88
89 if (!arch_is_coherent())
90 ___dma_single_dev_to_cpu(kaddr, size, dir);
Russell King18eabe22009-10-31 16:52:16 +000091}
92
93static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off,
94 size_t size, enum dma_data_direction dir)
95{
Russell King4ea0d732009-11-24 16:27:17 +000096 extern void ___dma_page_cpu_to_dev(struct page *, unsigned long,
97 size_t, enum dma_data_direction);
98
Russell King18eabe22009-10-31 16:52:16 +000099 if (!arch_is_coherent())
Russell King4ea0d732009-11-24 16:27:17 +0000100 ___dma_page_cpu_to_dev(page, off, size, dir);
Russell King18eabe22009-10-31 16:52:16 +0000101}
102
103static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off,
104 size_t size, enum dma_data_direction dir)
105{
Russell King4ea0d732009-11-24 16:27:17 +0000106 extern void ___dma_page_dev_to_cpu(struct page *, unsigned long,
107 size_t, enum dma_data_direction);
108
109 if (!arch_is_coherent())
110 ___dma_page_dev_to_cpu(page, off, size, dir);
Russell King18eabe22009-10-31 16:52:16 +0000111}
112
113/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 * Return whether the given device DMA address mask can be supported
115 * properly. For example, if your device can only drive the low 24-bits
116 * during bus mastering, then you would pass 0x00ffffff as the mask
117 * to this function.
akpm@osdl.org7a228aa2005-04-16 15:23:57 -0700118 *
119 * FIXME: This should really be a platform specific issue - we should
120 * return false if GFP_DMA allocations may not satisfy the supplied 'mask'.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 */
122static inline int dma_supported(struct device *dev, u64 mask)
123{
Russell King1124d6d2008-10-20 11:18:40 +0100124 if (mask < ISA_DMA_THRESHOLD)
125 return 0;
126 return 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127}
128
129static inline int dma_set_mask(struct device *dev, u64 dma_mask)
130{
FUJITA Tomonori6fee48c2010-03-10 15:23:40 -0800131#ifdef CONFIG_DMABOUNCE
132 if (dev->archdata.dmabounce) {
133 if (dma_mask >= ISA_DMA_THRESHOLD)
134 return 0;
135 else
136 return -EIO;
137 }
138#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
140 return -EIO;
141
142 *dev->dma_mask = dma_mask;
143
144 return 0;
145}
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147/*
148 * DMA errors are defined by all-bits-set in the DMA address.
149 */
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700150static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151{
152 return dma_addr == ~0;
153}
154
Russell Kingf454aa62007-02-12 19:26:05 +0000155/*
156 * Dummy noncoherent implementation. We don't provide a dma_cache_sync
157 * function so drivers using this API are highlighted with build warnings.
158 */
Russell King3216a972008-09-25 22:23:31 +0100159static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
160 dma_addr_t *handle, gfp_t gfp)
Russell Kingf454aa62007-02-12 19:26:05 +0000161{
162 return NULL;
163}
164
Russell King3216a972008-09-25 22:23:31 +0100165static inline void dma_free_noncoherent(struct device *dev, size_t size,
166 void *cpu_addr, dma_addr_t handle)
Russell Kingf454aa62007-02-12 19:26:05 +0000167{
168}
169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170/**
171 * dma_alloc_coherent - allocate consistent memory for DMA
172 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
173 * @size: required memory size
174 * @handle: bus-specific DMA address
175 *
176 * Allocate some uncached, unbuffered memory for a device for
177 * performing DMA. This function allocates pages, and will
178 * return the CPU-viewed address, and sets @handle to be the
179 * device-viewed address.
180 */
Russell King3216a972008-09-25 22:23:31 +0100181extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
183/**
184 * dma_free_coherent - free memory allocated by dma_alloc_coherent
185 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
186 * @size: size of memory originally requested in dma_alloc_coherent
187 * @cpu_addr: CPU-view address returned from dma_alloc_coherent
188 * @handle: device-view address returned from dma_alloc_coherent
189 *
190 * Free (and unmap) a DMA buffer previously allocated by
191 * dma_alloc_coherent().
192 *
193 * References to memory and mappings associated with cpu_addr/handle
194 * during and after this call executing are illegal.
195 */
Russell King3216a972008-09-25 22:23:31 +0100196extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198/**
199 * dma_mmap_coherent - map a coherent DMA allocation into user space
200 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
201 * @vma: vm_area_struct describing requested user mapping
202 * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent
203 * @handle: device-view address returned from dma_alloc_coherent
204 * @size: size of memory originally requested in dma_alloc_coherent
205 *
206 * Map a coherent DMA buffer previously allocated by dma_alloc_coherent
207 * into user space. The coherent DMA buffer must not be freed by the
208 * driver until the user space mapping has been released.
209 */
Russell King3216a972008-09-25 22:23:31 +0100210int dma_mmap_coherent(struct device *, struct vm_area_struct *,
211 void *, dma_addr_t, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212
213
214/**
215 * dma_alloc_writecombine - allocate writecombining memory for DMA
216 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
217 * @size: required memory size
218 * @handle: bus-specific DMA address
219 *
220 * Allocate some uncached, buffered memory for a device for
221 * performing DMA. This function allocates pages, and will
222 * return the CPU-viewed address, and sets @handle to be the
223 * device-viewed address.
224 */
Russell King3216a972008-09-25 22:23:31 +0100225extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *,
226 gfp_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
228#define dma_free_writecombine(dev,size,cpu_addr,handle) \
229 dma_free_coherent(dev,size,cpu_addr,handle)
230
Russell King3216a972008-09-25 22:23:31 +0100231int dma_mmap_writecombine(struct device *, struct vm_area_struct *,
232 void *, dma_addr_t, size_t);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
234
Russell King8c8a0ec52008-09-25 21:52:49 +0100235#ifdef CONFIG_DMABOUNCE
236/*
237 * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic"
238 * and utilize bounce buffers as needed to work around limited DMA windows.
239 *
240 * On the SA-1111, a bug limits DMA to only certain regions of RAM.
241 * On the IXP425, the PCI inbound window is 64MB (256MB total RAM)
242 * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM)
243 *
244 * The following are helper functions used by the dmabounce subystem
245 *
246 */
247
248/**
249 * dmabounce_register_dev
250 *
251 * @dev: valid struct device pointer
252 * @small_buf_size: size of buffers to use with small buffer pool
253 * @large_buf_size: size of buffers to use with large buffer pool (can be 0)
254 *
255 * This function should be called by low-level platform code to register
256 * a device as requireing DMA buffer bouncing. The function will allocate
257 * appropriate DMA pools for the device.
258 *
259 */
Russell King3216a972008-09-25 22:23:31 +0100260extern int dmabounce_register_dev(struct device *, unsigned long,
261 unsigned long);
Russell King8c8a0ec52008-09-25 21:52:49 +0100262
263/**
264 * dmabounce_unregister_dev
265 *
266 * @dev: valid struct device pointer
267 *
268 * This function should be called by low-level platform code when device
269 * that was previously registered with dmabounce_register_dev is removed
270 * from the system.
271 *
272 */
273extern void dmabounce_unregister_dev(struct device *);
274
275/**
276 * dma_needs_bounce
277 *
278 * @dev: valid struct device pointer
279 * @dma_handle: dma_handle of unbounced buffer
280 * @size: size of region being mapped
281 *
282 * Platforms that utilize the dmabounce mechanism must implement
283 * this function.
284 *
285 * The dmabounce routines call this function whenever a dma-mapping
286 * is requested to determine whether a given buffer needs to be bounced
287 * or not. The function must return 0 if the buffer is OK for
288 * DMA access and 1 if the buffer needs to be bounced.
289 *
290 */
Eric Miao4fa55182010-06-05 15:16:17 +0800291#ifdef CONFIG_SA1111
Russell King8c8a0ec52008-09-25 21:52:49 +0100292extern int dma_needs_bounce(struct device*, dma_addr_t, size_t);
Eric Miao4fa55182010-06-05 15:16:17 +0800293#else
294static inline int dma_needs_bounce(struct device *dev, dma_addr_t addr,
295 size_t size)
296{
297 return 0;
298}
299#endif
Russell King8c8a0ec52008-09-25 21:52:49 +0100300
301/*
Russell King125ab122008-09-25 22:16:22 +0100302 * The DMA API, implemented by dmabounce.c. See below for descriptions.
303 */
Russell King3216a972008-09-25 22:23:31 +0100304extern dma_addr_t dma_map_single(struct device *, void *, size_t,
305 enum dma_data_direction);
Russell King29cb8d02009-10-31 16:10:10 +0000306extern void dma_unmap_single(struct device *, dma_addr_t, size_t,
307 enum dma_data_direction);
Russell King3216a972008-09-25 22:23:31 +0100308extern dma_addr_t dma_map_page(struct device *, struct page *,
309 unsigned long, size_t, enum dma_data_direction);
Russell King29cb8d02009-10-31 16:10:10 +0000310extern void dma_unmap_page(struct device *, dma_addr_t, size_t,
Russell King3216a972008-09-25 22:23:31 +0100311 enum dma_data_direction);
Russell King125ab122008-09-25 22:16:22 +0100312
313/*
Russell King8c8a0ec52008-09-25 21:52:49 +0100314 * Private functions
315 */
316int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long,
Russell King3216a972008-09-25 22:23:31 +0100317 size_t, enum dma_data_direction);
Russell King8c8a0ec52008-09-25 21:52:49 +0100318int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long,
Russell King3216a972008-09-25 22:23:31 +0100319 size_t, enum dma_data_direction);
Russell King8c8a0ec52008-09-25 21:52:49 +0100320#else
Russell King9fa76792008-11-13 14:33:51 +0000321static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr,
322 unsigned long offset, size_t size, enum dma_data_direction dir)
323{
324 return 1;
325}
326
327static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr,
328 unsigned long offset, size_t size, enum dma_data_direction dir)
329{
330 return 1;
331}
Russell King8c8a0ec52008-09-25 21:52:49 +0100332
333
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334/**
335 * dma_map_single - map a single buffer for streaming DMA
336 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
337 * @cpu_addr: CPU direct mapped address of buffer
338 * @size: size of buffer to map
339 * @dir: DMA transfer direction
340 *
341 * Ensure that any data held in the cache is appropriately discarded
342 * or written back.
343 *
344 * The device owns this memory once this call has completed. The CPU
345 * can regain ownership by calling dma_unmap_single() or
346 * dma_sync_single_for_cpu().
347 */
Russell King3216a972008-09-25 22:23:31 +0100348static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
349 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350{
Russell King0e18b5d2008-09-29 13:48:17 +0100351 BUG_ON(!valid_dma_direction(dir));
352
Russell King18eabe22009-10-31 16:52:16 +0000353 __dma_single_cpu_to_dev(cpu_addr, size, dir);
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100354
Russell King98ed7d42008-08-10 12:10:49 +0100355 return virt_to_dma(dev, cpu_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356}
Russell King125ab122008-09-25 22:16:22 +0100357
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358/**
359 * dma_map_page - map a portion of a page for streaming DMA
360 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
361 * @page: page that buffer resides in
362 * @offset: offset into page for start of buffer
363 * @size: size of buffer to map
364 * @dir: DMA transfer direction
365 *
366 * Ensure that any data held in the cache is appropriately discarded
367 * or written back.
368 *
369 * The device owns this memory once this call has completed. The CPU
Russell King7807c602008-09-30 11:30:24 +0100370 * can regain ownership by calling dma_unmap_page().
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 */
Russell King3216a972008-09-25 22:23:31 +0100372static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
373 unsigned long offset, size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374{
Russell King0e18b5d2008-09-29 13:48:17 +0100375 BUG_ON(!valid_dma_direction(dir));
376
Russell King18eabe22009-10-31 16:52:16 +0000377 __dma_page_cpu_to_dev(page, offset, size, dir);
Russell King56f55f82008-09-25 20:59:12 +0100378
379 return page_to_dma(dev, page) + offset;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380}
381
382/**
383 * dma_unmap_single - unmap a single buffer previously mapped
384 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
385 * @handle: DMA address of buffer
Russell King7807c602008-09-30 11:30:24 +0100386 * @size: size of buffer (same as passed to dma_map_single)
387 * @dir: DMA transfer direction (same as passed to dma_map_single)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 *
389 * Unmap a single streaming mode DMA translation. The handle and size
390 * must match what was provided in the previous dma_map_single() call.
391 * All other usages are undefined.
392 *
393 * After this call, reads by the CPU to the buffer are guaranteed to see
394 * whatever the device wrote there.
395 */
Russell King3216a972008-09-25 22:23:31 +0100396static inline void dma_unmap_single(struct device *dev, dma_addr_t handle,
397 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
Russell King18eabe22009-10-31 16:52:16 +0000399 __dma_single_dev_to_cpu(dma_to_virt(dev, handle), size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401
402/**
403 * dma_unmap_page - unmap a buffer previously mapped through dma_map_page()
404 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
405 * @handle: DMA address of buffer
Russell King7807c602008-09-30 11:30:24 +0100406 * @size: size of buffer (same as passed to dma_map_page)
407 * @dir: DMA transfer direction (same as passed to dma_map_page)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 *
Russell King7807c602008-09-30 11:30:24 +0100409 * Unmap a page streaming mode DMA translation. The handle and size
410 * must match what was provided in the previous dma_map_page() call.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 * All other usages are undefined.
412 *
413 * After this call, reads by the CPU to the buffer are guaranteed to see
414 * whatever the device wrote there.
415 */
Russell King3216a972008-09-25 22:23:31 +0100416static inline void dma_unmap_page(struct device *dev, dma_addr_t handle,
417 size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418{
Russell King18eabe22009-10-31 16:52:16 +0000419 __dma_page_dev_to_cpu(dma_to_page(dev, handle), handle & ~PAGE_MASK,
420 size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421}
Russell King29cb8d02009-10-31 16:10:10 +0000422#endif /* CONFIG_DMABOUNCE */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423
424/**
Russell King9dd42862008-08-10 12:18:26 +0100425 * dma_sync_single_range_for_cpu
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
427 * @handle: DMA address of buffer
Russell King9dd42862008-08-10 12:18:26 +0100428 * @offset: offset of region to start sync
429 * @size: size of region to sync
430 * @dir: DMA transfer direction (same as passed to dma_map_single)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 *
432 * Make physical memory consistent for a single streaming mode DMA
433 * translation after a transfer.
434 *
435 * If you perform a dma_map_single() but wish to interrogate the
436 * buffer using the cpu, yet do not wish to teardown the PCI dma
437 * mapping, you must call this function before doing so. At the
438 * next point you give the PCI dma address back to the card, you
439 * must first the perform a dma_sync_for_device, and then the
440 * device again owns the buffer.
441 */
Russell King3216a972008-09-25 22:23:31 +0100442static inline void dma_sync_single_range_for_cpu(struct device *dev,
443 dma_addr_t handle, unsigned long offset, size_t size,
444 enum dma_data_direction dir)
Russell King9dd42862008-08-10 12:18:26 +0100445{
Russell King0e18b5d2008-09-29 13:48:17 +0100446 BUG_ON(!valid_dma_direction(dir));
447
Russell King18eabe22009-10-31 16:52:16 +0000448 if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir))
449 return;
450
451 __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir);
Russell King9dd42862008-08-10 12:18:26 +0100452}
453
Russell King3216a972008-09-25 22:23:31 +0100454static inline void dma_sync_single_range_for_device(struct device *dev,
455 dma_addr_t handle, unsigned long offset, size_t size,
456 enum dma_data_direction dir)
Russell King9dd42862008-08-10 12:18:26 +0100457{
Russell King0e18b5d2008-09-29 13:48:17 +0100458 BUG_ON(!valid_dma_direction(dir));
459
Russell King8c8a0ec52008-09-25 21:52:49 +0100460 if (!dmabounce_sync_for_device(dev, handle, offset, size, dir))
461 return;
462
Russell King18eabe22009-10-31 16:52:16 +0000463 __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir);
Russell King9dd42862008-08-10 12:18:26 +0100464}
Russell King9dd42862008-08-10 12:18:26 +0100465
Russell King3216a972008-09-25 22:23:31 +0100466static inline void dma_sync_single_for_cpu(struct device *dev,
467 dma_addr_t handle, size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468{
Russell King9dd42862008-08-10 12:18:26 +0100469 dma_sync_single_range_for_cpu(dev, handle, 0, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470}
471
Russell King3216a972008-09-25 22:23:31 +0100472static inline void dma_sync_single_for_device(struct device *dev,
473 dma_addr_t handle, size_t size, enum dma_data_direction dir)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474{
Russell King9dd42862008-08-10 12:18:26 +0100475 dma_sync_single_range_for_device(dev, handle, 0, size, dir);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477
Russell Kingafd1a322008-09-25 16:30:57 +0100478/*
479 * The scatter list versions of the above methods.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 */
Russell King3216a972008-09-25 22:23:31 +0100481extern int dma_map_sg(struct device *, struct scatterlist *, int,
482 enum dma_data_direction);
483extern void dma_unmap_sg(struct device *, struct scatterlist *, int,
484 enum dma_data_direction);
485extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int,
486 enum dma_data_direction);
487extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int,
488 enum dma_data_direction);
Russell Kingafd1a322008-09-25 16:30:57 +0100489
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491#endif /* __KERNEL__ */
492#endif