Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef ASMARM_DMA_MAPPING_H |
| 2 | #define ASMARM_DMA_MAPPING_H |
| 3 | |
| 4 | #ifdef __KERNEL__ |
| 5 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 6 | #include <linux/mm_types.h> |
Jens Axboe | dee9ba8 | 2007-10-23 12:37:59 +0200 | [diff] [blame] | 7 | #include <linux/scatterlist.h> |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 8 | #include <linux/dma-attrs.h> |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 9 | #include <linux/dma-debug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | |
Dmitry Baryshkov | 1fe5326 | 2008-07-18 13:30:14 +0400 | [diff] [blame] | 11 | #include <asm-generic/dma-coherent.h> |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 12 | #include <asm/memory.h> |
| 13 | |
Stefano Stabellini | c7e9bc5 | 2013-10-18 16:01:26 +0000 | [diff] [blame] | 14 | #include <xen/xen.h> |
| 15 | #include <asm/xen/hypervisor.h> |
| 16 | |
Marek Szyprowski | 553ac78 | 2012-02-29 14:45:28 +0100 | [diff] [blame] | 17 | #define DMA_ERROR_CODE (~0) |
Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 18 | extern struct dma_map_ops arm_dma_ops; |
Rob Herring | dd37e94 | 2012-08-21 12:20:17 +0200 | [diff] [blame] | 19 | extern struct dma_map_ops arm_coherent_dma_ops; |
Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 20 | |
Stefano Stabellini | c7e9bc5 | 2013-10-18 16:01:26 +0000 | [diff] [blame] | 21 | static inline struct dma_map_ops *__generic_dma_ops(struct device *dev) |
Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 22 | { |
| 23 | if (dev && dev->archdata.dma_ops) |
| 24 | return dev->archdata.dma_ops; |
| 25 | return &arm_dma_ops; |
| 26 | } |
| 27 | |
Stefano Stabellini | c7e9bc5 | 2013-10-18 16:01:26 +0000 | [diff] [blame] | 28 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
| 29 | { |
| 30 | if (xen_initial_domain()) |
| 31 | return xen_dma_ops; |
| 32 | else |
| 33 | return __generic_dma_ops(dev); |
| 34 | } |
| 35 | |
Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 36 | static inline void set_dma_ops(struct device *dev, struct dma_map_ops *ops) |
| 37 | { |
| 38 | BUG_ON(!dev); |
| 39 | dev->archdata.dma_ops = ops; |
| 40 | } |
| 41 | |
| 42 | #include <asm-generic/dma-mapping-common.h> |
| 43 | |
| 44 | static inline int dma_set_mask(struct device *dev, u64 mask) |
| 45 | { |
| 46 | return get_dma_ops(dev)->set_dma_mask(dev, mask); |
| 47 | } |
Marek Szyprowski | 553ac78 | 2012-02-29 14:45:28 +0100 | [diff] [blame] | 48 | |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 49 | #ifdef __arch_page_to_dma |
| 50 | #error Please update to __arch_pfn_to_dma |
| 51 | #endif |
| 52 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 53 | /* |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 54 | * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private |
| 55 | * functions used internally by the DMA-mapping API to provide DMA |
| 56 | * addresses. They must not be used by drivers. |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 57 | */ |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 58 | #ifndef __arch_pfn_to_dma |
| 59 | static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) |
Nicolas Pitre | 58edb51 | 2008-09-09 15:54:13 -0400 | [diff] [blame] | 60 | { |
Grygorii Strashko | 6ce0d20 | 2014-04-24 11:30:05 -0400 | [diff] [blame] | 61 | if (dev) |
| 62 | pfn -= dev->dma_pfn_offset; |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 63 | return (dma_addr_t)__pfn_to_bus(pfn); |
Nicolas Pitre | 58edb51 | 2008-09-09 15:54:13 -0400 | [diff] [blame] | 64 | } |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 65 | |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 66 | static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) |
Russell King | ef1baed | 2009-10-31 16:07:16 +0000 | [diff] [blame] | 67 | { |
Grygorii Strashko | 6ce0d20 | 2014-04-24 11:30:05 -0400 | [diff] [blame] | 68 | unsigned long pfn = __bus_to_pfn(addr); |
| 69 | |
| 70 | if (dev) |
| 71 | pfn += dev->dma_pfn_offset; |
| 72 | |
| 73 | return pfn; |
Russell King | ef1baed | 2009-10-31 16:07:16 +0000 | [diff] [blame] | 74 | } |
| 75 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 76 | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) |
| 77 | { |
Grygorii Strashko | 6ce0d20 | 2014-04-24 11:30:05 -0400 | [diff] [blame] | 78 | if (dev) { |
| 79 | unsigned long pfn = dma_to_pfn(dev, addr); |
| 80 | |
| 81 | return phys_to_virt(__pfn_to_phys(pfn)); |
| 82 | } |
| 83 | |
Catalin Marinas | 01f461a | 2011-08-23 13:59:14 +0100 | [diff] [blame] | 84 | return (void *)__bus_to_virt((unsigned long)addr); |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 85 | } |
| 86 | |
| 87 | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) |
| 88 | { |
Grygorii Strashko | 6ce0d20 | 2014-04-24 11:30:05 -0400 | [diff] [blame] | 89 | if (dev) |
| 90 | return pfn_to_dma(dev, virt_to_pfn(addr)); |
| 91 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 92 | return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); |
| 93 | } |
Santosh Shilimkar | 26ba47b | 2013-08-01 03:12:01 +0100 | [diff] [blame] | 94 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 95 | #else |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 96 | static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 97 | { |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 98 | return __arch_pfn_to_dma(dev, pfn); |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 99 | } |
| 100 | |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 101 | static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) |
Russell King | ef1baed | 2009-10-31 16:07:16 +0000 | [diff] [blame] | 102 | { |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 103 | return __arch_dma_to_pfn(dev, addr); |
Russell King | ef1baed | 2009-10-31 16:07:16 +0000 | [diff] [blame] | 104 | } |
| 105 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 106 | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) |
| 107 | { |
| 108 | return __arch_dma_to_virt(dev, addr); |
| 109 | } |
| 110 | |
| 111 | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) |
| 112 | { |
| 113 | return __arch_virt_to_dma(dev, addr); |
| 114 | } |
| 115 | #endif |
Dmitry Baryshkov | 1fe5326 | 2008-07-18 13:30:14 +0400 | [diff] [blame] | 116 | |
Santosh Shilimkar | 26ba47b | 2013-08-01 03:12:01 +0100 | [diff] [blame] | 117 | /* The ARM override for dma_max_pfn() */ |
| 118 | static inline unsigned long dma_max_pfn(struct device *dev) |
| 119 | { |
| 120 | return PHYS_PFN_OFFSET + dma_to_pfn(dev, *dev->dma_mask); |
| 121 | } |
| 122 | #define dma_max_pfn(dev) dma_max_pfn(dev) |
| 123 | |
Santosh Shilimkar | 812b99e | 2014-04-24 11:30:06 -0400 | [diff] [blame] | 124 | static inline int set_arch_dma_coherent_ops(struct device *dev) |
| 125 | { |
| 126 | set_dma_ops(dev, &arm_coherent_dma_ops); |
| 127 | return 0; |
| 128 | } |
| 129 | #define set_arch_dma_coherent_ops(dev) set_arch_dma_coherent_ops(dev) |
| 130 | |
Stefano Stabellini | fbd989b | 2013-10-30 01:21:27 +0000 | [diff] [blame] | 131 | static inline dma_addr_t phys_to_dma(struct device *dev, phys_addr_t paddr) |
| 132 | { |
| 133 | unsigned int offset = paddr & ~PAGE_MASK; |
| 134 | return pfn_to_dma(dev, __phys_to_pfn(paddr)) + offset; |
| 135 | } |
| 136 | |
| 137 | static inline phys_addr_t dma_to_phys(struct device *dev, dma_addr_t dev_addr) |
| 138 | { |
| 139 | unsigned int offset = dev_addr & ~PAGE_MASK; |
| 140 | return __pfn_to_phys(dma_to_pfn(dev, dev_addr)) + offset; |
| 141 | } |
| 142 | |
| 143 | static inline bool dma_capable(struct device *dev, dma_addr_t addr, size_t size) |
| 144 | { |
| 145 | u64 limit, mask; |
| 146 | |
| 147 | if (!dev->dma_mask) |
| 148 | return 0; |
| 149 | |
| 150 | mask = *dev->dma_mask; |
| 151 | |
| 152 | limit = (mask + 1) & ~mask; |
| 153 | if (limit && size > limit) |
| 154 | return 0; |
| 155 | |
| 156 | if ((addr | (addr + size - 1)) & ~mask) |
| 157 | return 0; |
| 158 | |
| 159 | return 1; |
| 160 | } |
| 161 | |
| 162 | static inline void dma_mark_clean(void *addr, size_t size) { } |
| 163 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 165 | * DMA errors are defined by all-bits-set in the DMA address. |
| 166 | */ |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 167 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 168 | { |
Ming Lei | a015757 | 2012-10-22 20:44:03 +0800 | [diff] [blame] | 169 | debug_dma_mapping_error(dev, dma_addr); |
Marek Szyprowski | 553ac78 | 2012-02-29 14:45:28 +0100 | [diff] [blame] | 170 | return dma_addr == DMA_ERROR_CODE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | } |
| 172 | |
Russell King | f454aa6 | 2007-02-12 19:26:05 +0000 | [diff] [blame] | 173 | /* |
| 174 | * Dummy noncoherent implementation. We don't provide a dma_cache_sync |
| 175 | * function so drivers using this API are highlighted with build warnings. |
| 176 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 177 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
| 178 | dma_addr_t *handle, gfp_t gfp) |
Russell King | f454aa6 | 2007-02-12 19:26:05 +0000 | [diff] [blame] | 179 | { |
| 180 | return NULL; |
| 181 | } |
| 182 | |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 183 | static inline void dma_free_noncoherent(struct device *dev, size_t size, |
| 184 | void *cpu_addr, dma_addr_t handle) |
Russell King | f454aa6 | 2007-02-12 19:26:05 +0000 | [diff] [blame] | 185 | { |
| 186 | } |
| 187 | |
Marek Szyprowski | 15237e1 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 188 | extern int dma_supported(struct device *dev, u64 mask); |
| 189 | |
Gregory CLEMENT | 87b54e7 | 2012-11-21 09:39:19 +0100 | [diff] [blame] | 190 | extern int arm_dma_set_mask(struct device *dev, u64 dma_mask); |
| 191 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 192 | /** |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 193 | * arm_dma_alloc - allocate consistent memory for DMA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 194 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 195 | * @size: required memory size |
| 196 | * @handle: bus-specific DMA address |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 197 | * @attrs: optinal attributes that specific mapping properties |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | * |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 199 | * Allocate some memory for a device for performing DMA. This function |
| 200 | * allocates pages, and will return the CPU-viewed address, and sets @handle |
| 201 | * to be the device-viewed address. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | */ |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 203 | extern void *arm_dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, |
| 204 | gfp_t gfp, struct dma_attrs *attrs); |
| 205 | |
| 206 | #define dma_alloc_coherent(d, s, h, f) dma_alloc_attrs(d, s, h, f, NULL) |
| 207 | |
| 208 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, |
| 209 | dma_addr_t *dma_handle, gfp_t flag, |
| 210 | struct dma_attrs *attrs) |
| 211 | { |
| 212 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 213 | void *cpu_addr; |
| 214 | BUG_ON(!ops); |
| 215 | |
| 216 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); |
| 217 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); |
| 218 | return cpu_addr; |
| 219 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | |
| 221 | /** |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 222 | * arm_dma_free - free memory allocated by arm_dma_alloc |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 223 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 224 | * @size: size of memory originally requested in dma_alloc_coherent |
| 225 | * @cpu_addr: CPU-view address returned from dma_alloc_coherent |
| 226 | * @handle: device-view address returned from dma_alloc_coherent |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 227 | * @attrs: optinal attributes that specific mapping properties |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 228 | * |
| 229 | * Free (and unmap) a DMA buffer previously allocated by |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 230 | * arm_dma_alloc(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 231 | * |
| 232 | * References to memory and mappings associated with cpu_addr/handle |
| 233 | * during and after this call executing are illegal. |
| 234 | */ |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 235 | extern void arm_dma_free(struct device *dev, size_t size, void *cpu_addr, |
| 236 | dma_addr_t handle, struct dma_attrs *attrs); |
| 237 | |
| 238 | #define dma_free_coherent(d, s, c, h) dma_free_attrs(d, s, c, h, NULL) |
| 239 | |
| 240 | static inline void dma_free_attrs(struct device *dev, size_t size, |
| 241 | void *cpu_addr, dma_addr_t dma_handle, |
| 242 | struct dma_attrs *attrs) |
| 243 | { |
| 244 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 245 | BUG_ON(!ops); |
| 246 | |
| 247 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); |
| 248 | ops->free(dev, size, cpu_addr, dma_handle, attrs); |
| 249 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 250 | |
| 251 | /** |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 252 | * arm_dma_mmap - map a coherent DMA allocation into user space |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 253 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 254 | * @vma: vm_area_struct describing requested user mapping |
| 255 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent |
| 256 | * @handle: device-view address returned from dma_alloc_coherent |
| 257 | * @size: size of memory originally requested in dma_alloc_coherent |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 258 | * @attrs: optinal attributes that specific mapping properties |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 259 | * |
| 260 | * Map a coherent DMA buffer previously allocated by dma_alloc_coherent |
| 261 | * into user space. The coherent DMA buffer must not be freed by the |
| 262 | * driver until the user space mapping has been released. |
| 263 | */ |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 264 | extern int arm_dma_mmap(struct device *dev, struct vm_area_struct *vma, |
| 265 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 266 | struct dma_attrs *attrs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 267 | |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 268 | static inline void *dma_alloc_writecombine(struct device *dev, size_t size, |
| 269 | dma_addr_t *dma_handle, gfp_t flag) |
| 270 | { |
| 271 | DEFINE_DMA_ATTRS(attrs); |
| 272 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); |
| 273 | return dma_alloc_attrs(dev, size, dma_handle, flag, &attrs); |
| 274 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | |
Marek Szyprowski | f99d603 | 2012-05-16 18:31:23 +0200 | [diff] [blame] | 276 | static inline void dma_free_writecombine(struct device *dev, size_t size, |
| 277 | void *cpu_addr, dma_addr_t dma_handle) |
| 278 | { |
| 279 | DEFINE_DMA_ATTRS(attrs); |
| 280 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); |
| 281 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); |
| 282 | } |
| 283 | |
Jon Medhurst | 99d1717 | 2011-08-02 17:28:27 +0100 | [diff] [blame] | 284 | /* |
Marek Szyprowski | 6e5267a | 2012-08-20 11:19:25 +0200 | [diff] [blame] | 285 | * This can be called during early boot to increase the size of the atomic |
| 286 | * coherent DMA pool above the default value of 256KiB. It must be called |
| 287 | * before postcore_initcall. |
| 288 | */ |
| 289 | extern void __init init_dma_coherent_pool_size(unsigned long size); |
| 290 | |
| 291 | /* |
Russell King | 8c8a0ec5 | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 292 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" |
| 293 | * and utilize bounce buffers as needed to work around limited DMA windows. |
| 294 | * |
| 295 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. |
| 296 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) |
| 297 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) |
| 298 | * |
| 299 | * The following are helper functions used by the dmabounce subystem |
| 300 | * |
| 301 | */ |
| 302 | |
| 303 | /** |
| 304 | * dmabounce_register_dev |
| 305 | * |
| 306 | * @dev: valid struct device pointer |
| 307 | * @small_buf_size: size of buffers to use with small buffer pool |
| 308 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) |
Russell King | 0703ed2 | 2011-07-04 08:32:21 +0100 | [diff] [blame] | 309 | * @needs_bounce_fn: called to determine whether buffer needs bouncing |
Russell King | 8c8a0ec5 | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 310 | * |
| 311 | * This function should be called by low-level platform code to register |
| 312 | * a device as requireing DMA buffer bouncing. The function will allocate |
| 313 | * appropriate DMA pools for the device. |
Russell King | 8c8a0ec5 | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 314 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 315 | extern int dmabounce_register_dev(struct device *, unsigned long, |
Russell King | 0703ed2 | 2011-07-04 08:32:21 +0100 | [diff] [blame] | 316 | unsigned long, int (*)(struct device *, dma_addr_t, size_t)); |
Russell King | 8c8a0ec5 | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 317 | |
| 318 | /** |
| 319 | * dmabounce_unregister_dev |
| 320 | * |
| 321 | * @dev: valid struct device pointer |
| 322 | * |
| 323 | * This function should be called by low-level platform code when device |
| 324 | * that was previously registered with dmabounce_register_dev is removed |
| 325 | * from the system. |
| 326 | * |
| 327 | */ |
| 328 | extern void dmabounce_unregister_dev(struct device *); |
| 329 | |
Russell King | 125ab12 | 2008-09-25 22:16:22 +0100 | [diff] [blame] | 330 | |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 331 | |
Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 332 | /* |
| 333 | * The scatter list versions of the above methods. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 334 | */ |
Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 335 | extern int arm_dma_map_sg(struct device *, struct scatterlist *, int, |
| 336 | enum dma_data_direction, struct dma_attrs *attrs); |
| 337 | extern void arm_dma_unmap_sg(struct device *, struct scatterlist *, int, |
| 338 | enum dma_data_direction, struct dma_attrs *attrs); |
| 339 | extern void arm_dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 340 | enum dma_data_direction); |
Marek Szyprowski | 2dc6a01 | 2012-02-10 19:55:20 +0100 | [diff] [blame] | 341 | extern void arm_dma_sync_sg_for_device(struct device *, struct scatterlist *, int, |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 342 | enum dma_data_direction); |
Marek Szyprowski | dc2832e | 2012-06-13 10:01:15 +0200 | [diff] [blame] | 343 | extern int arm_dma_get_sgtable(struct device *dev, struct sg_table *sgt, |
| 344 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 345 | struct dma_attrs *attrs); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 347 | #endif /* __KERNEL__ */ |
| 348 | #endif |