Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_DMA_MAPPING_H |
| 2 | #define __ASM_SH_DMA_MAPPING_H |
| 3 | |
Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 4 | extern struct dma_map_ops *dma_ops; |
| 5 | extern void no_iommu_init(void); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | |
Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 7 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
| 8 | { |
| 9 | return dma_ops; |
| 10 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | |
Paul Mundt | f32154c9 | 2009-10-26 09:50:51 +0900 | [diff] [blame] | 12 | #include <asm-generic/dma-coherent.h> |
| 13 | #include <asm-generic/dma-mapping-common.h> |
| 14 | |
Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 15 | static inline int dma_supported(struct device *dev, u64 mask) |
| 16 | { |
| 17 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 18 | |
| 19 | if (ops->dma_supported) |
| 20 | return ops->dma_supported(dev, mask); |
| 21 | |
| 22 | return 1; |
| 23 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
| 25 | static inline int dma_set_mask(struct device *dev, u64 mask) |
| 26 | { |
Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 27 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 28 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
| 30 | return -EIO; |
Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 31 | if (ops->set_dma_mask) |
| 32 | return ops->set_dma_mask(dev, mask); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
| 34 | *dev->dma_mask = mask; |
| 35 | |
| 36 | return 0; |
| 37 | } |
| 38 | |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 39 | void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
| 40 | enum dma_data_direction dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | |
Paul Mundt | c7666e7 | 2007-02-13 11:11:22 +0900 | [diff] [blame] | 42 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
| 43 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
Paul Mundt | 478fb15 | 2009-10-27 10:41:58 +0900 | [diff] [blame^] | 44 | |
| 45 | #ifdef CONFIG_DMA_COHERENT |
Paul Mundt | c7666e7 | 2007-02-13 11:11:22 +0900 | [diff] [blame] | 46 | #define dma_is_consistent(d, h) (1) |
Paul Mundt | 478fb15 | 2009-10-27 10:41:58 +0900 | [diff] [blame^] | 47 | #else |
| 48 | #define dma_is_consistent(d, h) (0) |
| 49 | #endif |
Paul Mundt | c7666e7 | 2007-02-13 11:11:22 +0900 | [diff] [blame] | 50 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | static inline int dma_get_cache_alignment(void) |
| 52 | { |
| 53 | /* |
| 54 | * Each processor family will define its own L1_CACHE_SHIFT, |
| 55 | * L1_CACHE_BYTES wraps to this, so this is always safe. |
| 56 | */ |
| 57 | return L1_CACHE_BYTES; |
| 58 | } |
| 59 | |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 60 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | { |
Paul Mundt | 73c926b | 2009-10-20 12:55:56 +0900 | [diff] [blame] | 62 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 63 | |
| 64 | if (ops->mapping_error) |
| 65 | return ops->mapping_error(dev, dma_addr); |
| 66 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | return dma_addr == 0; |
| 68 | } |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 69 | |
Paul Mundt | f32154c9 | 2009-10-26 09:50:51 +0900 | [diff] [blame] | 70 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
| 71 | dma_addr_t *dma_handle, gfp_t gfp) |
| 72 | { |
| 73 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 74 | void *memory; |
| 75 | |
| 76 | if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) |
| 77 | return memory; |
| 78 | if (!ops->alloc_coherent) |
| 79 | return NULL; |
| 80 | |
| 81 | memory = ops->alloc_coherent(dev, size, dma_handle, gfp); |
| 82 | debug_dma_alloc_coherent(dev, size, *dma_handle, memory); |
| 83 | |
| 84 | return memory; |
| 85 | } |
| 86 | |
| 87 | static inline void dma_free_coherent(struct device *dev, size_t size, |
| 88 | void *vaddr, dma_addr_t dma_handle) |
| 89 | { |
| 90 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 91 | |
| 92 | WARN_ON(irqs_disabled()); /* for portability */ |
| 93 | |
| 94 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) |
| 95 | return; |
| 96 | |
| 97 | debug_dma_free_coherent(dev, size, vaddr, dma_handle); |
| 98 | if (ops->free_coherent) |
| 99 | ops->free_coherent(dev, size, vaddr, dma_handle); |
| 100 | } |
| 101 | |
| 102 | /* arch/sh/mm/consistent.c */ |
| 103 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
| 104 | dma_addr_t *dma_addr, gfp_t flag); |
| 105 | extern void dma_generic_free_coherent(struct device *dev, size_t size, |
| 106 | void *vaddr, dma_addr_t dma_handle); |
Magnus Damm | f93e97e | 2008-01-24 18:35:10 +0900 | [diff] [blame] | 107 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | #endif /* __ASM_SH_DMA_MAPPING_H */ |