Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef ASMARM_DMA_MAPPING_H |
| 2 | #define ASMARM_DMA_MAPPING_H |
| 3 | |
| 4 | #ifdef __KERNEL__ |
| 5 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 6 | #include <linux/mm_types.h> |
Jens Axboe | dee9ba8 | 2007-10-23 12:37:59 +0200 | [diff] [blame] | 7 | #include <linux/scatterlist.h> |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 8 | #include <linux/dma-attrs.h> |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 9 | #include <linux/dma-debug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | |
Dmitry Baryshkov | 1fe5326 | 2008-07-18 13:30:14 +0400 | [diff] [blame] | 11 | #include <asm-generic/dma-coherent.h> |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 12 | #include <asm/memory.h> |
| 13 | |
Marek Szyprowski | 1dc8f00 | 2012-02-29 14:45:28 +0100 | [diff] [blame] | 14 | #define DMA_ERROR_CODE (~0) |
Marek Szyprowski | f6fe282 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 15 | extern struct dma_map_ops arm_dma_ops; |
| 16 | |
| 17 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
| 18 | { |
| 19 | if (dev && dev->archdata.dma_ops) |
| 20 | return dev->archdata.dma_ops; |
| 21 | return &arm_dma_ops; |
| 22 | } |
| 23 | |
| 24 | static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) |
| 25 | { |
| 26 | BUG_ON(!dev); |
| 27 | dev->archdata.dma_ops = ops; |
| 28 | } |
| 29 | |
| 30 | #include <asm-generic/dma-mapping-common.h> |
| 31 | |
| 32 | static inline int dma_set_mask(struct device *dev, u64 mask) |
| 33 | { |
| 34 | return get_dma_ops(dev)->set_dma_mask(dev, mask); |
| 35 | } |
Marek Szyprowski | 1dc8f00 | 2012-02-29 14:45:28 +0100 | [diff] [blame] | 36 | |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 37 | #ifdef __arch_page_to_dma |
| 38 | #error Please update to __arch_pfn_to_dma |
| 39 | #endif |
| 40 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 41 | /* |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 42 | * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private |
| 43 | * functions used internally by the DMA-mapping API to provide DMA |
| 44 | * addresses. They must not be used by drivers. |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 45 | */ |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 46 | #ifndef __arch_pfn_to_dma |
| 47 | static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) |
Nicolas Pitre | 58edb51 | 2008-09-09 15:54:13 -0400 | [diff] [blame] | 48 | { |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 49 | return (dma_addr_t)__pfn_to_bus(pfn); |
Nicolas Pitre | 58edb51 | 2008-09-09 15:54:13 -0400 | [diff] [blame] | 50 | } |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 51 | |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 52 | static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) |
Russell King | ef1baed | 2009-10-31 16:07:16 +0000 | [diff] [blame] | 53 | { |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 54 | return __bus_to_pfn(addr); |
Russell King | ef1baed | 2009-10-31 16:07:16 +0000 | [diff] [blame] | 55 | } |
| 56 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 57 | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) |
| 58 | { |
Catalin Marinas | 01f461a | 2011-08-23 13:59:14 +0100 | [diff] [blame] | 59 | return (void *)__bus_to_virt((unsigned long)addr); |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 60 | } |
| 61 | |
| 62 | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) |
| 63 | { |
| 64 | return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); |
| 65 | } |
| 66 | #else |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 67 | static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 68 | { |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 69 | return __arch_pfn_to_dma(dev, pfn); |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 70 | } |
| 71 | |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 72 | static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) |
Russell King | ef1baed | 2009-10-31 16:07:16 +0000 | [diff] [blame] | 73 | { |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 74 | return __arch_dma_to_pfn(dev, addr); |
Russell King | ef1baed | 2009-10-31 16:07:16 +0000 | [diff] [blame] | 75 | } |
| 76 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 77 | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) |
| 78 | { |
| 79 | return __arch_dma_to_virt(dev, addr); |
| 80 | } |
| 81 | |
| 82 | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) |
| 83 | { |
| 84 | return __arch_virt_to_dma(dev, addr); |
| 85 | } |
| 86 | #endif |
Dmitry Baryshkov | 1fe5326 | 2008-07-18 13:30:14 +0400 | [diff] [blame] | 87 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | * DMA errors are defined by all-bits-set in the DMA address. |
| 90 | */ |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 91 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | { |
Marek Szyprowski | 1dc8f00 | 2012-02-29 14:45:28 +0100 | [diff] [blame] | 93 | return dma_addr == DMA_ERROR_CODE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 94 | } |
| 95 | |
Russell King | f454aa6 | 2007-02-12 19:26:05 +0000 | [diff] [blame] | 96 | /* |
| 97 | * Dummy noncoherent implementation. We don't provide a dma_cache_sync |
| 98 | * function so drivers using this API are highlighted with build warnings. |
| 99 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 100 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
| 101 | dma_addr_t *handle, gfp_t gfp) |
Russell King | f454aa6 | 2007-02-12 19:26:05 +0000 | [diff] [blame] | 102 | { |
| 103 | return NULL; |
| 104 | } |
| 105 | |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 106 | static inline void dma_free_noncoherent(struct device *dev, size_t size, |
| 107 | void *cpu_addr, dma_addr_t handle) |
Russell King | f454aa6 | 2007-02-12 19:26:05 +0000 | [diff] [blame] | 108 | { |
| 109 | } |
| 110 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 111 | |
| 112 | /* |
| 113 | * dma_coherent_pre_ops - barrier functions for coherent memory before DMA. |
| 114 | * A barrier is required to ensure memory operations are complete before the |
| 115 | * initiation of a DMA xfer. |
| 116 | * If the coherent memory is Strongly Ordered |
| 117 | * - pre ARMv7 and 8x50 guarantees ordering wrt other mem accesses |
| 118 | * - ARMv7 guarantees ordering only within a 1KB block, so we need a barrier |
| 119 | * If coherent memory is normal then we need a barrier to prevent |
| 120 | * reordering |
| 121 | */ |
| 122 | static inline void dma_coherent_pre_ops(void) |
| 123 | { |
| 124 | #if COHERENT_IS_NORMAL == 1 |
| 125 | dmb(); |
| 126 | #else |
| 127 | if (arch_is_coherent()) |
| 128 | dmb(); |
| 129 | else |
| 130 | barrier(); |
| 131 | #endif |
| 132 | } |
| 133 | /* |
| 134 | * dma_post_coherent_ops - barrier functions for coherent memory after DMA. |
| 135 | * If the coherent memory is Strongly Ordered we dont need a barrier since |
| 136 | * there are no speculative fetches to Strongly Ordered memory. |
| 137 | * If coherent memory is normal then we need a barrier to prevent reordering |
| 138 | */ |
| 139 | static inline void dma_coherent_post_ops(void) |
| 140 | { |
| 141 | #if COHERENT_IS_NORMAL == 1 |
| 142 | dmb(); |
| 143 | #else |
| 144 | if (arch_is_coherent()) |
| 145 | dmb(); |
| 146 | else |
| 147 | barrier(); |
| 148 | #endif |
| 149 | } |
| 150 | |
Marek Szyprowski | 75cd651 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 151 | extern int dma_supported(struct device *dev, u64 mask); |
| 152 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 153 | /** |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 154 | * arm_dma_alloc - allocate consistent memory for DMA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 155 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 156 | * @size: required memory size |
| 157 | * @handle: bus-specific DMA address |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 158 | * @attrs: optinal attributes that specific mapping properties |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 159 | * |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 160 | * Allocate some memory for a device for performing DMA. This function |
| 161 | * allocates pages, and will return the CPU-viewed address, and sets @handle |
| 162 | * to be the device-viewed address. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | */ |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 164 | extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
| 165 | gfp_t gfp, struct dma_attrs *attrs); |
| 166 | |
| 167 | #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) |
| 168 | |
| 169 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, |
| 170 | dma_addr_t *dma_handle, gfp_t flag, |
| 171 | struct dma_attrs *attrs) |
| 172 | { |
| 173 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 174 | void *cpu_addr; |
| 175 | BUG_ON(!ops); |
| 176 | |
| 177 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); |
| 178 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); |
| 179 | return cpu_addr; |
| 180 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | |
| 182 | /** |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 183 | * arm_dma_free - free memory allocated by arm_dma_alloc |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 184 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 185 | * @size: size of memory originally requested in dma_alloc_coherent |
| 186 | * @cpu_addr: CPU-view address returned from dma_alloc_coherent |
| 187 | * @handle: device-view address returned from dma_alloc_coherent |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 188 | * @attrs: optinal attributes that specific mapping properties |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 189 | * |
| 190 | * Free (and unmap) a DMA buffer previously allocated by |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 191 | * arm_dma_alloc(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | * |
| 193 | * References to memory and mappings associated with cpu_addr/handle |
| 194 | * during and after this call executing are illegal. |
| 195 | */ |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 196 | extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, |
| 197 | dma_addr_t handle, struct dma_attrs *attrs); |
| 198 | |
| 199 | #define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) |
| 200 | |
| 201 | static inline void dma_free_attrs(struct device *dev, size_t size, |
| 202 | void *cpu_addr, dma_addr_t dma_handle, |
| 203 | struct dma_attrs *attrs) |
| 204 | { |
| 205 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 206 | BUG_ON(!ops); |
| 207 | |
| 208 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); |
| 209 | ops->free(dev, size, cpu_addr, dma_handle, attrs); |
| 210 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 211 | |
| 212 | /** |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 213 | * arm_dma_mmap - map a coherent DMA allocation into user space |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 215 | * @vma: vm_area_struct describing requested user mapping |
| 216 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent |
| 217 | * @handle: device-view address returned from dma_alloc_coherent |
| 218 | * @size: size of memory originally requested in dma_alloc_coherent |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 219 | * @attrs: optinal attributes that specific mapping properties |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | * |
| 221 | * Map a coherent DMA buffer previously allocated by dma_alloc_coherent |
| 222 | * into user space. The coherent DMA buffer must not be freed by the |
| 223 | * driver until the user space mapping has been released. |
| 224 | */ |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 225 | extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
| 226 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 227 | struct dma_attrs *attrs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 229 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 230 | |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 231 | static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
| 232 | void *cpu_addr, dma_addr_t dma_addr, |
| 233 | size_t size, struct dma_attrs *attrs) |
| 234 | { |
| 235 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 236 | BUG_ON(!ops); |
| 237 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
| 238 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 239 | |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 240 | static inline void *dma_alloc_writecombine(struct device *dev, size_t size, |
| 241 | dma_addr_t *dma_handle, gfp_t flag) |
| 242 | { |
| 243 | DEFINE_DMA_ATTRS(attrs); |
| 244 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); |
| 245 | return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs); |
| 246 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 247 | |
Marek Szyprowski | a93786a | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 248 | static inline void dma_free_writecombine(struct device *dev, size_t size, |
| 249 | void *cpu_addr, dma_addr_t dma_handle) |
| 250 | { |
| 251 | DEFINE_DMA_ATTRS(attrs); |
| 252 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); |
| 253 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); |
| 254 | } |
| 255 | |
| 256 | static inline int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, |
| 257 | void *cpu_addr, dma_addr_t dma_addr, size_t size) |
| 258 | { |
| 259 | DEFINE_DMA_ATTRS(attrs); |
| 260 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); |
| 261 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); |
| 262 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 263 | |
Laura Abbott | e78ee49 | 2012-10-29 12:53:22 -0700 | [diff] [blame] | 264 | static inline void *dma_alloc_stronglyordered(struct device *dev, size_t size, |
| 265 | dma_addr_t *dma_handle, gfp_t flag) |
| 266 | { |
| 267 | DEFINE_DMA_ATTRS(attrs); |
| 268 | dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs); |
| 269 | return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs); |
| 270 | } |
| 271 | |
| 272 | static inline void dma_free_stronglyordered(struct device *dev, size_t size, |
| 273 | void *cpu_addr, dma_addr_t dma_handle) |
| 274 | { |
| 275 | DEFINE_DMA_ATTRS(attrs); |
| 276 | dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs); |
| 277 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); |
| 278 | } |
| 279 | |
| 280 | static inline int dma_mmap_stronglyordered(struct device *dev, |
| 281 | struct vm_area_struct *vma, void *cpu_addr, |
| 282 | dma_addr_t dma_addr, size_t size) |
| 283 | { |
| 284 | DEFINE_DMA_ATTRS(attrs); |
| 285 | dma_set_attr(DMA_ATTR_STRONGLY_ORDERED, &attrs); |
| 286 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); |
| 287 | } |
| 288 | |
| 289 | static inline void *dma_alloc_nonconsistent(struct device *dev, size_t size, |
| 290 | dma_addr_t *dma_handle, gfp_t flag) |
| 291 | { |
| 292 | DEFINE_DMA_ATTRS(attrs); |
| 293 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); |
| 294 | return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs); |
| 295 | } |
| 296 | |
| 297 | static inline void dma_free_nonconsistent(struct device *dev, size_t size, |
| 298 | void *cpu_addr, dma_addr_t dma_handle) |
| 299 | { |
| 300 | DEFINE_DMA_ATTRS(attrs); |
| 301 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); |
| 302 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); |
| 303 | } |
| 304 | |
| 305 | static inline int dma_mmap_nonconsistent(struct device *dev, |
| 306 | struct vm_area_struct *vma, void *cpu_addr, |
| 307 | dma_addr_t dma_addr, size_t size) |
| 308 | { |
| 309 | DEFINE_DMA_ATTRS(attrs); |
| 310 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); |
| 311 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); |
| 312 | } |
| 313 | |
| 314 | |
| 315 | |
Jon Medhurst | 99d1717 | 2011-08-02 17:28:27 +0100 | [diff] [blame] | 316 | /* |
| 317 | * This can be called during boot to increase the size of the consistent |
| 318 | * DMA region above it's default value of 2MB. It must be called before the |
| 319 | * memory allocator is initialised, i.e. before any core_initcall. |
| 320 | */ |
| 321 | extern void __init init_consistent_dma_size(unsigned long size); |
| 322 | |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 323 | /* |
| 324 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" |
| 325 | * and utilize bounce buffers as needed to work around limited DMA windows. |
| 326 | * |
| 327 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. |
| 328 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) |
| 329 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) |
| 330 | * |
| 331 | * The following are helper functions used by the dmabounce subystem |
| 332 | * |
| 333 | */ |
| 334 | |
| 335 | /** |
| 336 | * dmabounce_register_dev |
| 337 | * |
| 338 | * @dev: valid struct device pointer |
| 339 | * @small_buf_size: size of buffers to use with small buffer pool |
| 340 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) |
Russell King | 0703ed2 | 2011-07-04 08:32:21 +0100 | [diff] [blame] | 341 | * @needs_bounce_fn: called to determine whether buffer needs bouncing |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 342 | * |
| 343 | * This function should be called by low-level platform code to register |
| 344 | * a device as requireing DMA buffer bouncing. The function will allocate |
| 345 | * appropriate DMA pools for the device. |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 346 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 347 | extern int dmabounce_register_dev(struct device *, unsigned long, |
Russell King | 0703ed2 | 2011-07-04 08:32:21 +0100 | [diff] [blame] | 348 | unsigned long, int (*)(struct device *, dma_addr_t, size_t)); |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 349 | |
| 350 | /** |
| 351 | * dmabounce_unregister_dev |
| 352 | * |
| 353 | * @dev: valid struct device pointer |
| 354 | * |
| 355 | * This function should be called by low-level platform code when device |
| 356 | * that was previously registered with dmabounce_register_dev is removed |
| 357 | * from the system. |
| 358 | * |
| 359 | */ |
| 360 | extern void dmabounce_unregister_dev(struct device *); |
| 361 | |
Russell King | 125ab12 | 2008-09-25 22:16:22 +0100 | [diff] [blame] | 362 | |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 363 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 364 | /** |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 365 | * dma_cache_pre_ops - clean or invalidate cache before dma transfer is |
| 366 | * initiated and perform a barrier operation. |
| 367 | * @virtual_addr: A kernel logical or kernel virtual address |
| 368 | * @size: size of buffer to map |
| 369 | * @dir: DMA transfer direction |
| 370 | * |
| 371 | * Ensure that any data held in the cache is appropriately discarded |
| 372 | * or written back. |
| 373 | * |
| 374 | */ |
| 375 | static inline void dma_cache_pre_ops(void *virtual_addr, |
| 376 | size_t size, enum dma_data_direction dir) |
| 377 | { |
| 378 | extern void ___dma_single_cpu_to_dev(const void *, size_t, |
| 379 | enum dma_data_direction); |
| 380 | |
| 381 | BUG_ON(!valid_dma_direction(dir)); |
| 382 | |
| 383 | if (!arch_is_coherent()) |
| 384 | ___dma_single_cpu_to_dev(virtual_addr, size, dir); |
| 385 | } |
| 386 | |
| 387 | /** |
| 388 | * dma_cache_post_ops - clean or invalidate cache after dma transfer is |
| 389 | * initiated and perform a barrier operation. |
| 390 | * @virtual_addr: A kernel logical or kernel virtual address |
| 391 | * @size: size of buffer to map |
| 392 | * @dir: DMA transfer direction |
| 393 | * |
| 394 | * Ensure that any data held in the cache is appropriately discarded |
| 395 | * or written back. |
| 396 | * |
| 397 | */ |
| 398 | static inline void dma_cache_post_ops(void *virtual_addr, |
| 399 | size_t size, enum dma_data_direction dir) |
| 400 | { |
| 401 | extern void ___dma_single_cpu_to_dev(const void *, size_t, |
| 402 | enum dma_data_direction); |
| 403 | |
| 404 | BUG_ON(!valid_dma_direction(dir)); |
| 405 | |
| 406 | if (arch_has_speculative_dfetch() && !arch_is_coherent() |
| 407 | && dir != DMA_TO_DEVICE) |
| 408 | /* |
| 409 | * Treat DMA_BIDIRECTIONAL and DMA_FROM_DEVICE |
| 410 | * identically: invalidate |
| 411 | */ |
| 412 | ___dma_single_cpu_to_dev(virtual_addr, |
| 413 | size, DMA_FROM_DEVICE); |
| 414 | } |
Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 415 | /* |
| 416 | * The scatter list versions of the above methods. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 417 | */ |
Marek Szyprowski | f6fe282 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 418 | extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, |
| 419 | enum dma_data_direction, struct dma_attrs *attrs); |
| 420 | extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, |
| 421 | enum dma_data_direction, struct dma_attrs *attrs); |
| 422 | extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 423 | enum dma_data_direction); |
Marek Szyprowski | f6fe282 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 424 | extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 425 | enum dma_data_direction); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 427 | #endif /* __KERNEL__ */ |
| 428 | #endif |