blob: 655f849bd08d8767e81f6846ce205ce40e487980 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_DMA_MAPPING_H
2#define _ASM_DMA_MAPPING_H
3
4#include <asm/scatterlist.h>
5#include <asm/cache.h>
Yoichi Yuasaf8ac04252009-06-04 00:16:04 +09006#include <asm-generic/dma-coherent.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007
David Daney48e1fd52010-10-01 13:27:32 -07008#include <dma-coherence.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
David Daney48e1fd52010-10-01 13:27:32 -070010extern struct dma_map_ops *mips_dma_map_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -070011
David Daney48e1fd52010-10-01 13:27:32 -070012static inline struct dma_map_ops *get_dma_ops(struct device *dev)
Atsushi Nemoto4f29c052009-01-23 00:42:11 +090013{
David Daney48e1fd52010-10-01 13:27:32 -070014 if (dev && dev->archdata.dma_ops)
15 return dev->archdata.dma_ops;
16 else
17 return mips_dma_map_ops;
Atsushi Nemoto4f29c052009-01-23 00:42:11 +090018}
19
David Daney48e1fd52010-10-01 13:27:32 -070020static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size)
21{
22 if (!dev->dma_mask)
23 return 0;
24
25 return addr + size <= *dev->dma_mask;
26}
27
28static inline void dma_mark_clean(void *addr, size_t size) {}
29
30#include <asm-generic/dma-mapping-common.h>
31
32static inline int dma_supported(struct device *dev, u64 mask)
33{
34 struct dma_map_ops *ops = get_dma_ops(dev);
35 return ops->dma_supported(dev, mask);
36}
37
38static inline int dma_mapping_error(struct device *dev, u64 mask)
39{
40 struct dma_map_ops *ops = get_dma_ops(dev);
41 return ops->mapping_error(dev, mask);
42}
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44static inline int
45dma_set_mask(struct device *dev, u64 mask)
46{
47 if(!dev->dma_mask || !dma_supported(dev, mask))
48 return -EIO;
49
50 *dev->dma_mask = mask;
51
52 return 0;
53}
54
Ralf Baechled3fa72e2006-12-06 20:38:56 -080055extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 enum dma_data_direction direction);
57
David Daney48e1fd52010-10-01 13:27:32 -070058static inline void *dma_alloc_coherent(struct device *dev, size_t size,
59 dma_addr_t *dma_handle, gfp_t gfp)
60{
61 void *ret;
62 struct dma_map_ops *ops = get_dma_ops(dev);
63
64 ret = ops->alloc_coherent(dev, size, dma_handle, gfp);
65
66 debug_dma_alloc_coherent(dev, size, *dma_handle, ret);
67
68 return ret;
69}
70
71static inline void dma_free_coherent(struct device *dev, size_t size,
72 void *vaddr, dma_addr_t dma_handle)
73{
74 struct dma_map_ops *ops = get_dma_ops(dev);
75
76 ops->free_coherent(dev, size, vaddr, dma_handle);
77
78 debug_dma_free_coherent(dev, size, vaddr, dma_handle);
79}
80
81
82void *dma_alloc_noncoherent(struct device *dev, size_t size,
83 dma_addr_t *dma_handle, gfp_t flag);
84
85void dma_free_noncoherent(struct device *dev, size_t size,
86 void *vaddr, dma_addr_t dma_handle);
87
Linus Torvalds1da177e2005-04-16 15:20:36 -070088#endif /* _ASM_DMA_MAPPING_H */