Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef __ASM_SH_DMA_MAPPING_H |
| 2 | #define __ASM_SH_DMA_MAPPING_H |
| 3 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 4 | #include <linux/mm.h> |
| 5 | #include <asm/scatterlist.h> |
Paul Mundt | 0d83177 | 2006-01-16 22:14:09 -0800 | [diff] [blame] | 6 | #include <asm/cacheflush.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <asm/io.h> |
| 8 | |
| 9 | extern struct bus_type pci_bus_type; |
| 10 | |
| 11 | /* arch/sh/mm/consistent.c */ |
Al Viro | 6dae2c2 | 2005-10-21 03:21:38 -0400 | [diff] [blame] | 12 | extern void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *handle); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | extern void consistent_free(void *vaddr, size_t size); |
| 14 | extern void consistent_sync(void *vaddr, size_t size, int direction); |
| 15 | |
| 16 | #define dma_supported(dev, mask) (1) |
| 17 | |
| 18 | static inline int dma_set_mask(struct device *dev, u64 mask) |
| 19 | { |
| 20 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
| 21 | return -EIO; |
| 22 | |
| 23 | *dev->dma_mask = mask; |
| 24 | |
| 25 | return 0; |
| 26 | } |
| 27 | |
| 28 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
Al Viro | 6dae2c2 | 2005-10-21 03:21:38 -0400 | [diff] [blame] | 29 | dma_addr_t *dma_handle, gfp_t flag) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | { |
| 31 | if (sh_mv.mv_consistent_alloc) { |
| 32 | void *ret; |
| 33 | |
| 34 | ret = sh_mv.mv_consistent_alloc(dev, size, dma_handle, flag); |
| 35 | if (ret != NULL) |
| 36 | return ret; |
| 37 | } |
| 38 | |
| 39 | return consistent_alloc(flag, size, dma_handle); |
| 40 | } |
| 41 | |
| 42 | static inline void dma_free_coherent(struct device *dev, size_t size, |
| 43 | void *vaddr, dma_addr_t dma_handle) |
| 44 | { |
| 45 | if (sh_mv.mv_consistent_free) { |
| 46 | int ret; |
| 47 | |
| 48 | ret = sh_mv.mv_consistent_free(dev, size, vaddr, dma_handle); |
| 49 | if (ret == 0) |
| 50 | return; |
| 51 | } |
| 52 | |
| 53 | consistent_free(vaddr, size); |
| 54 | } |
| 55 | |
Paul Mundt | c7666e7 | 2007-02-13 11:11:22 +0900 | [diff] [blame] | 56 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
| 57 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
| 58 | #define dma_is_consistent(d, h) (1) |
| 59 | |
Ralf Baechle | d3fa72e | 2006-12-06 20:38:56 -0800 | [diff] [blame] | 60 | static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | enum dma_data_direction dir) |
| 62 | { |
| 63 | consistent_sync(vaddr, size, (int)dir); |
| 64 | } |
| 65 | |
| 66 | static inline dma_addr_t dma_map_single(struct device *dev, |
| 67 | void *ptr, size_t size, |
| 68 | enum dma_data_direction dir) |
| 69 | { |
| 70 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
| 71 | if (dev->bus == &pci_bus_type) |
| 72 | return virt_to_bus(ptr); |
| 73 | #endif |
Paul Mundt | 5432143 | 2006-12-09 09:17:01 +0900 | [diff] [blame] | 74 | dma_cache_sync(dev, ptr, size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | |
| 76 | return virt_to_bus(ptr); |
| 77 | } |
| 78 | |
| 79 | #define dma_unmap_single(dev, addr, size, dir) do { } while (0) |
| 80 | |
| 81 | static inline int dma_map_sg(struct device *dev, struct scatterlist *sg, |
| 82 | int nents, enum dma_data_direction dir) |
| 83 | { |
| 84 | int i; |
| 85 | |
| 86 | for (i = 0; i < nents; i++) { |
| 87 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
Paul Mundt | 5432143 | 2006-12-09 09:17:01 +0900 | [diff] [blame] | 88 | dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | sg[i].length, dir); |
| 90 | #endif |
| 91 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; |
| 92 | } |
| 93 | |
| 94 | return nents; |
| 95 | } |
| 96 | |
| 97 | #define dma_unmap_sg(dev, sg, nents, dir) do { } while (0) |
| 98 | |
| 99 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
| 100 | unsigned long offset, size_t size, |
| 101 | enum dma_data_direction dir) |
| 102 | { |
| 103 | return dma_map_single(dev, page_address(page) + offset, size, dir); |
| 104 | } |
| 105 | |
| 106 | static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address, |
| 107 | size_t size, enum dma_data_direction dir) |
| 108 | { |
| 109 | dma_unmap_single(dev, dma_address, size, dir); |
| 110 | } |
| 111 | |
| 112 | static inline void dma_sync_single(struct device *dev, dma_addr_t dma_handle, |
| 113 | size_t size, enum dma_data_direction dir) |
| 114 | { |
| 115 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
| 116 | if (dev->bus == &pci_bus_type) |
| 117 | return; |
| 118 | #endif |
Paul Mundt | 5432143 | 2006-12-09 09:17:01 +0900 | [diff] [blame] | 119 | dma_cache_sync(dev, bus_to_virt(dma_handle), size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | } |
| 121 | |
| 122 | static inline void dma_sync_single_range(struct device *dev, |
| 123 | dma_addr_t dma_handle, |
| 124 | unsigned long offset, size_t size, |
| 125 | enum dma_data_direction dir) |
| 126 | { |
| 127 | #if defined(CONFIG_PCI) && !defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
| 128 | if (dev->bus == &pci_bus_type) |
| 129 | return; |
| 130 | #endif |
Paul Mundt | 5432143 | 2006-12-09 09:17:01 +0900 | [diff] [blame] | 131 | dma_cache_sync(dev, bus_to_virt(dma_handle) + offset, size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | } |
| 133 | |
| 134 | static inline void dma_sync_sg(struct device *dev, struct scatterlist *sg, |
| 135 | int nelems, enum dma_data_direction dir) |
| 136 | { |
| 137 | int i; |
| 138 | |
| 139 | for (i = 0; i < nelems; i++) { |
| 140 | #if !defined(CONFIG_PCI) || defined(CONFIG_SH_PCIDMA_NONCOHERENT) |
Paul Mundt | 5432143 | 2006-12-09 09:17:01 +0900 | [diff] [blame] | 141 | dma_cache_sync(dev, page_address(sg[i].page) + sg[i].offset, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 142 | sg[i].length, dir); |
| 143 | #endif |
| 144 | sg[i].dma_address = page_to_phys(sg[i].page) + sg[i].offset; |
| 145 | } |
| 146 | } |
| 147 | |
Paul Mundt | 87b0ef9 | 2006-09-27 18:34:41 +0900 | [diff] [blame] | 148 | static inline void dma_sync_single_for_cpu(struct device *dev, |
| 149 | dma_addr_t dma_handle, size_t size, |
| 150 | enum dma_data_direction dir) |
| 151 | { |
| 152 | dma_sync_single(dev, dma_handle, size, dir); |
| 153 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 154 | |
Paul Mundt | 87b0ef9 | 2006-09-27 18:34:41 +0900 | [diff] [blame] | 155 | static inline void dma_sync_single_for_device(struct device *dev, |
| 156 | dma_addr_t dma_handle, |
| 157 | size_t size, |
| 158 | enum dma_data_direction dir) |
| 159 | { |
| 160 | dma_sync_single(dev, dma_handle, size, dir); |
| 161 | } |
| 162 | |
| 163 | static inline void dma_sync_sg_for_cpu(struct device *dev, |
| 164 | struct scatterlist *sg, int nelems, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | enum dma_data_direction dir) |
Paul Mundt | 87b0ef9 | 2006-09-27 18:34:41 +0900 | [diff] [blame] | 166 | { |
| 167 | dma_sync_sg(dev, sg, nelems, dir); |
| 168 | } |
Paul Mundt | 0d83177 | 2006-01-16 22:14:09 -0800 | [diff] [blame] | 169 | |
Paul Mundt | 87b0ef9 | 2006-09-27 18:34:41 +0900 | [diff] [blame] | 170 | static inline void dma_sync_sg_for_device(struct device *dev, |
| 171 | struct scatterlist *sg, int nelems, |
| 172 | enum dma_data_direction dir) |
| 173 | { |
| 174 | dma_sync_sg(dev, sg, nelems, dir); |
| 175 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 176 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 177 | |
| 178 | static inline int dma_get_cache_alignment(void) |
| 179 | { |
| 180 | /* |
| 181 | * Each processor family will define its own L1_CACHE_SHIFT, |
| 182 | * L1_CACHE_BYTES wraps to this, so this is always safe. |
| 183 | */ |
| 184 | return L1_CACHE_BYTES; |
| 185 | } |
| 186 | |
| 187 | static inline int dma_mapping_error(dma_addr_t dma_addr) |
| 188 | { |
| 189 | return dma_addr == 0; |
| 190 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 191 | #endif /* __ASM_SH_DMA_MAPPING_H */ |