Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef _ASM_DMA_MAPPING_H |
| 2 | #define _ASM_DMA_MAPPING_H |
| 3 | |
| 4 | #include <asm/scatterlist.h> |
| 5 | #include <asm/cache.h> |
Yoichi Yuasa | f8ac0425 | 2009-06-04 00:16:04 +0900 | [diff] [blame] | 6 | #include <asm-generic/dma-coherent.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | |
David Daney | 48e1fd5 | 2010-10-01 13:27:32 -0700 | [diff] [blame^] | 8 | #include <dma-coherence.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
David Daney | 48e1fd5 | 2010-10-01 13:27:32 -0700 | [diff] [blame^] | 10 | extern struct dma_map_ops *mips_dma_map_ops; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
David Daney | 48e1fd5 | 2010-10-01 13:27:32 -0700 | [diff] [blame^] | 12 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
Atsushi Nemoto | 4f29c05 | 2009-01-23 00:42:11 +0900 | [diff] [blame] | 13 | { |
David Daney | 48e1fd5 | 2010-10-01 13:27:32 -0700 | [diff] [blame^] | 14 | if (dev && dev->archdata.dma_ops) |
| 15 | return dev->archdata.dma_ops; |
| 16 | else |
| 17 | return mips_dma_map_ops; |
Atsushi Nemoto | 4f29c05 | 2009-01-23 00:42:11 +0900 | [diff] [blame] | 18 | } |
| 19 | |
David Daney | 48e1fd5 | 2010-10-01 13:27:32 -0700 | [diff] [blame^] | 20 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
| 21 | { |
| 22 | if (!dev->dma_mask) |
| 23 | return 0; |
| 24 | |
| 25 | return addr + size <= *dev->dma_mask; |
| 26 | } |
| 27 | |
| 28 | static inline void dma_mark_clean(void *addr, size_t size) {} |
| 29 | |
| 30 | #include <asm-generic/dma-mapping-common.h> |
| 31 | |
| 32 | static inline int dma_supported(struct device *dev, u64 mask) |
| 33 | { |
| 34 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 35 | return ops->dma_supported(dev, mask); |
| 36 | } |
| 37 | |
| 38 | static inline int dma_mapping_error(struct device *dev, u64 mask) |
| 39 | { |
| 40 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 41 | return ops->mapping_error(dev, mask); |
| 42 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 43 | |
| 44 | static inline int |
| 45 | dma_set_mask(struct device *dev, u64 mask) |
| 46 | { |
| 47 | if(!dev->dma_mask || !dma_supported(dev, mask)) |
| 48 | return -EIO; |
| 49 | |
| 50 | *dev->dma_mask = mask; |
| 51 | |
| 52 | return 0; |
| 53 | } |
| 54 | |
Ralf Baechle | d3fa72e | 2006-12-06 20:38:56 -0800 | [diff] [blame] | 55 | extern void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | enum dma_data_direction direction); |
| 57 | |
David Daney | 48e1fd5 | 2010-10-01 13:27:32 -0700 | [diff] [blame^] | 58 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
| 59 | dma_addr_t *dma_handle, gfp_t gfp) |
| 60 | { |
| 61 | void *ret; |
| 62 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 63 | |
| 64 | ret = ops->alloc_coherent(dev, size, dma_handle, gfp); |
| 65 | |
| 66 | debug_dma_alloc_coherent(dev, size, *dma_handle, ret); |
| 67 | |
| 68 | return ret; |
| 69 | } |
| 70 | |
| 71 | static inline void dma_free_coherent(struct device *dev, size_t size, |
| 72 | void *vaddr, dma_addr_t dma_handle) |
| 73 | { |
| 74 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 75 | |
| 76 | ops->free_coherent(dev, size, vaddr, dma_handle); |
| 77 | |
| 78 | debug_dma_free_coherent(dev, size, vaddr, dma_handle); |
| 79 | } |
| 80 | |
| 81 | |
| 82 | void *dma_alloc_noncoherent(struct device *dev, size_t size, |
| 83 | dma_addr_t *dma_handle, gfp_t flag); |
| 84 | |
| 85 | void dma_free_noncoherent(struct device *dev, size_t size, |
| 86 | void *vaddr, dma_addr_t dma_handle); |
| 87 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | #endif /* _ASM_DMA_MAPPING_H */ |