Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef ASMARM_DMA_MAPPING_H |
| 2 | #define ASMARM_DMA_MAPPING_H |
| 3 | |
| 4 | #ifdef __KERNEL__ |
| 5 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 6 | #include <linux/mm_types.h> |
Jens Axboe | dee9ba8 | 2007-10-23 12:37:59 +0200 | [diff] [blame] | 7 | #include <linux/scatterlist.h> |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 8 | #include <linux/dma-debug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 9 | |
Dmitry Baryshkov | 1fe5326 | 2008-07-18 13:30:14 +0400 | [diff] [blame] | 10 | #include <asm-generic/dma-coherent.h> |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 11 | #include <asm/memory.h> |
| 12 | |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 13 | #ifdef __arch_page_to_dma |
| 14 | #error Please update to __arch_pfn_to_dma |
| 15 | #endif |
| 16 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 17 | /* |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 18 | * dma_to_pfn/pfn_to_dma/dma_to_virt/virt_to_dma are architecture private |
| 19 | * functions used internally by the DMA-mapping API to provide DMA |
| 20 | * addresses. They must not be used by drivers. |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 21 | */ |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 22 | #ifndef __arch_pfn_to_dma |
| 23 | static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) |
Nicolas Pitre | 58edb51 | 2008-09-09 15:54:13 -0400 | [diff] [blame] | 24 | { |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 25 | return (dma_addr_t)__pfn_to_bus(pfn); |
Nicolas Pitre | 58edb51 | 2008-09-09 15:54:13 -0400 | [diff] [blame] | 26 | } |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 27 | |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 28 | static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) |
Russell King | ef1baed | 2009-10-31 16:07:16 +0000 | [diff] [blame] | 29 | { |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 30 | return __bus_to_pfn(addr); |
Russell King | ef1baed | 2009-10-31 16:07:16 +0000 | [diff] [blame] | 31 | } |
| 32 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 33 | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) |
| 34 | { |
Catalin Marinas | 01f461a | 2011-08-23 13:59:14 +0100 | [diff] [blame] | 35 | return (void *)__bus_to_virt((unsigned long)addr); |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 36 | } |
| 37 | |
| 38 | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) |
| 39 | { |
| 40 | return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); |
| 41 | } |
| 42 | #else |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 43 | static inline dma_addr_t pfn_to_dma(struct device *dev, unsigned long pfn) |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 44 | { |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 45 | return __arch_pfn_to_dma(dev, pfn); |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 46 | } |
| 47 | |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 48 | static inline unsigned long dma_to_pfn(struct device *dev, dma_addr_t addr) |
Russell King | ef1baed | 2009-10-31 16:07:16 +0000 | [diff] [blame] | 49 | { |
Russell King | 9eedd96 | 2011-01-03 00:00:17 +0000 | [diff] [blame] | 50 | return __arch_dma_to_pfn(dev, addr); |
Russell King | ef1baed | 2009-10-31 16:07:16 +0000 | [diff] [blame] | 51 | } |
| 52 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 53 | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) |
| 54 | { |
| 55 | return __arch_dma_to_virt(dev, addr); |
| 56 | } |
| 57 | |
| 58 | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) |
| 59 | { |
| 60 | return __arch_virt_to_dma(dev, addr); |
| 61 | } |
| 62 | #endif |
Dmitry Baryshkov | 1fe5326 | 2008-07-18 13:30:14 +0400 | [diff] [blame] | 63 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | /* |
Russell King | 18eabe2 | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 65 | * The DMA API is built upon the notion of "buffer ownership". A buffer |
| 66 | * is either exclusively owned by the CPU (and therefore may be accessed |
| 67 | * by it) or exclusively owned by the DMA device. These helper functions |
| 68 | * represent the transitions between these two ownership states. |
| 69 | * |
Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 70 | * Note, however, that on later ARMs, this notion does not work due to |
| 71 | * speculative prefetches. We model our approach on the assumption that |
| 72 | * the CPU does do speculative prefetches, which means we clean caches |
| 73 | * before transfers and delay cache invalidation until transfer completion. |
| 74 | * |
| 75 | * Private support functions: these are not part of the API and are |
| 76 | * liable to change. Drivers must not use these. |
Russell King | 18eabe2 | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 77 | */ |
| 78 | static inline void __dma_single_cpu_to_dev(const void *kaddr, size_t size, |
| 79 | enum dma_data_direction dir) |
| 80 | { |
Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 81 | extern void ___dma_single_cpu_to_dev(const void *, size_t, |
| 82 | enum dma_data_direction); |
| 83 | |
Russell King | 18eabe2 | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 84 | if (!arch_is_coherent()) |
Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 85 | ___dma_single_cpu_to_dev(kaddr, size, dir); |
Russell King | 18eabe2 | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 86 | } |
| 87 | |
| 88 | static inline void __dma_single_dev_to_cpu(const void *kaddr, size_t size, |
| 89 | enum dma_data_direction dir) |
| 90 | { |
Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 91 | extern void ___dma_single_dev_to_cpu(const void *, size_t, |
| 92 | enum dma_data_direction); |
| 93 | |
| 94 | if (!arch_is_coherent()) |
| 95 | ___dma_single_dev_to_cpu(kaddr, size, dir); |
Russell King | 18eabe2 | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 96 | } |
| 97 | |
| 98 | static inline void __dma_page_cpu_to_dev(struct page *page, unsigned long off, |
| 99 | size_t size, enum dma_data_direction dir) |
| 100 | { |
Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 101 | extern void ___dma_page_cpu_to_dev(struct page *, unsigned long, |
| 102 | size_t, enum dma_data_direction); |
| 103 | |
Russell King | 18eabe2 | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 104 | if (!arch_is_coherent()) |
Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 105 | ___dma_page_cpu_to_dev(page, off, size, dir); |
Russell King | 18eabe2 | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 106 | } |
| 107 | |
| 108 | static inline void __dma_page_dev_to_cpu(struct page *page, unsigned long off, |
| 109 | size_t size, enum dma_data_direction dir) |
| 110 | { |
Russell King | 4ea0d73 | 2009-11-24 16:27:17 +0000 | [diff] [blame] | 111 | extern void ___dma_page_dev_to_cpu(struct page *, unsigned long, |
| 112 | size_t, enum dma_data_direction); |
| 113 | |
| 114 | if (!arch_is_coherent()) |
| 115 | ___dma_page_dev_to_cpu(page, off, size, dir); |
Russell King | 18eabe2 | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 116 | } |
| 117 | |
Russell King | 022ae53 | 2011-07-08 21:26:59 +0100 | [diff] [blame] | 118 | extern int dma_supported(struct device *, u64); |
| 119 | extern int dma_set_mask(struct device *, u64); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 120 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | /* |
| 122 | * DMA errors are defined by all-bits-set in the DMA address. |
| 123 | */ |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 124 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | { |
| 126 | return dma_addr == ~0; |
| 127 | } |
| 128 | |
Russell King | f454aa6 | 2007-02-12 19:26:05 +0000 | [diff] [blame] | 129 | /* |
| 130 | * Dummy noncoherent implementation. We don't provide a dma_cache_sync |
| 131 | * function so drivers using this API are highlighted with build warnings. |
| 132 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 133 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
| 134 | dma_addr_t *handle, gfp_t gfp) |
Russell King | f454aa6 | 2007-02-12 19:26:05 +0000 | [diff] [blame] | 135 | { |
| 136 | return NULL; |
| 137 | } |
| 138 | |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 139 | static inline void dma_free_noncoherent(struct device *dev, size_t size, |
| 140 | void *cpu_addr, dma_addr_t handle) |
Russell King | f454aa6 | 2007-02-12 19:26:05 +0000 | [diff] [blame] | 141 | { |
| 142 | } |
| 143 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 144 | /** |
| 145 | * dma_alloc_coherent - allocate consistent memory for DMA |
| 146 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 147 | * @size: required memory size |
| 148 | * @handle: bus-specific DMA address |
| 149 | * |
| 150 | * Allocate some uncached, unbuffered memory for a device for |
| 151 | * performing DMA. This function allocates pages, and will |
| 152 | * return the CPU-viewed address, and sets @handle to be the |
| 153 | * device-viewed address. |
| 154 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 155 | extern void *dma_alloc_coherent(struct device *, size_t, dma_addr_t *, gfp_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 156 | |
| 157 | /** |
| 158 | * dma_free_coherent - free memory allocated by dma_alloc_coherent |
| 159 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 160 | * @size: size of memory originally requested in dma_alloc_coherent |
| 161 | * @cpu_addr: CPU-view address returned from dma_alloc_coherent |
| 162 | * @handle: device-view address returned from dma_alloc_coherent |
| 163 | * |
| 164 | * Free (and unmap) a DMA buffer previously allocated by |
| 165 | * dma_alloc_coherent(). |
| 166 | * |
| 167 | * References to memory and mappings associated with cpu_addr/handle |
| 168 | * during and after this call executing are illegal. |
| 169 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 170 | extern void dma_free_coherent(struct device *, size_t, void *, dma_addr_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | |
| 172 | /** |
| 173 | * dma_mmap_coherent - map a coherent DMA allocation into user space |
| 174 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 175 | * @vma: vm_area_struct describing requested user mapping |
| 176 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent |
| 177 | * @handle: device-view address returned from dma_alloc_coherent |
| 178 | * @size: size of memory originally requested in dma_alloc_coherent |
| 179 | * |
| 180 | * Map a coherent DMA buffer previously allocated by dma_alloc_coherent |
| 181 | * into user space. The coherent DMA buffer must not be freed by the |
| 182 | * driver until the user space mapping has been released. |
| 183 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 184 | int dma_mmap_coherent(struct device *, struct vm_area_struct *, |
| 185 | void *, dma_addr_t, size_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | |
| 187 | |
| 188 | /** |
| 189 | * dma_alloc_writecombine - allocate writecombining memory for DMA |
| 190 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 191 | * @size: required memory size |
| 192 | * @handle: bus-specific DMA address |
| 193 | * |
| 194 | * Allocate some uncached, buffered memory for a device for |
| 195 | * performing DMA. This function allocates pages, and will |
| 196 | * return the CPU-viewed address, and sets @handle to be the |
| 197 | * device-viewed address. |
| 198 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 199 | extern void *dma_alloc_writecombine(struct device *, size_t, dma_addr_t *, |
| 200 | gfp_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | |
| 202 | #define dma_free_writecombine(dev,size,cpu_addr,handle) \ |
| 203 | dma_free_coherent(dev,size,cpu_addr,handle) |
| 204 | |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 205 | int dma_mmap_writecombine(struct device *, struct vm_area_struct *, |
| 206 | void *, dma_addr_t, size_t); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | |
Jon Medhurst | 99d1717 | 2011-08-02 17:28:27 +0100 | [diff] [blame] | 208 | /* |
| 209 | * This can be called during boot to increase the size of the consistent |
| 210 | * DMA region above it's default value of 2MB. It must be called before the |
| 211 | * memory allocator is initialised, i.e. before any core_initcall. |
| 212 | */ |
| 213 | extern void __init init_consistent_dma_size(unsigned long size); |
| 214 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 215 | |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 216 | #ifdef CONFIG_DMABOUNCE |
| 217 | /* |
| 218 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" |
| 219 | * and utilize bounce buffers as needed to work around limited DMA windows. |
| 220 | * |
| 221 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. |
| 222 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) |
| 223 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) |
| 224 | * |
| 225 | * The following are helper functions used by the dmabounce subystem |
| 226 | * |
| 227 | */ |
| 228 | |
| 229 | /** |
| 230 | * dmabounce_register_dev |
| 231 | * |
| 232 | * @dev: valid struct device pointer |
| 233 | * @small_buf_size: size of buffers to use with small buffer pool |
| 234 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) |
Russell King | 0703ed2 | 2011-07-04 08:32:21 +0100 | [diff] [blame] | 235 | * @needs_bounce_fn: called to determine whether buffer needs bouncing |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 236 | * |
| 237 | * This function should be called by low-level platform code to register |
| 238 | * a device as requireing DMA buffer bouncing. The function will allocate |
| 239 | * appropriate DMA pools for the device. |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 240 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 241 | extern int dmabounce_register_dev(struct device *, unsigned long, |
Russell King | 0703ed2 | 2011-07-04 08:32:21 +0100 | [diff] [blame] | 242 | unsigned long, int (*)(struct device *, dma_addr_t, size_t)); |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 243 | |
| 244 | /** |
| 245 | * dmabounce_unregister_dev |
| 246 | * |
| 247 | * @dev: valid struct device pointer |
| 248 | * |
| 249 | * This function should be called by low-level platform code when device |
| 250 | * that was previously registered with dmabounce_register_dev is removed |
| 251 | * from the system. |
| 252 | * |
| 253 | */ |
| 254 | extern void dmabounce_unregister_dev(struct device *); |
| 255 | |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 256 | /* |
Russell King | 125ab12 | 2008-09-25 22:16:22 +0100 | [diff] [blame] | 257 | * The DMA API, implemented by dmabounce.c. See below for descriptions. |
| 258 | */ |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 259 | extern dma_addr_t __dma_map_page(struct device *, struct page *, |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 260 | unsigned long, size_t, enum dma_data_direction); |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 261 | extern void __dma_unmap_page(struct device *, dma_addr_t, size_t, |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 262 | enum dma_data_direction); |
Russell King | 125ab12 | 2008-09-25 22:16:22 +0100 | [diff] [blame] | 263 | |
| 264 | /* |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 265 | * Private functions |
| 266 | */ |
| 267 | int dmabounce_sync_for_cpu(struct device *, dma_addr_t, unsigned long, |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 268 | size_t, enum dma_data_direction); |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 269 | int dmabounce_sync_for_device(struct device *, dma_addr_t, unsigned long, |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 270 | size_t, enum dma_data_direction); |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 271 | #else |
Russell King | 9fa7679 | 2008-11-13 14:33:51 +0000 | [diff] [blame] | 272 | static inline int dmabounce_sync_for_cpu(struct device *d, dma_addr_t addr, |
| 273 | unsigned long offset, size_t size, enum dma_data_direction dir) |
| 274 | { |
| 275 | return 1; |
| 276 | } |
| 277 | |
| 278 | static inline int dmabounce_sync_for_device(struct device *d, dma_addr_t addr, |
| 279 | unsigned long offset, size_t size, enum dma_data_direction dir) |
| 280 | { |
| 281 | return 1; |
| 282 | } |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 283 | |
| 284 | |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 285 | static inline dma_addr_t __dma_map_page(struct device *dev, struct page *page, |
| 286 | unsigned long offset, size_t size, enum dma_data_direction dir) |
| 287 | { |
| 288 | __dma_page_cpu_to_dev(page, offset, size, dir); |
| 289 | return pfn_to_dma(dev, page_to_pfn(page)) + offset; |
| 290 | } |
| 291 | |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 292 | static inline void __dma_unmap_page(struct device *dev, dma_addr_t handle, |
| 293 | size_t size, enum dma_data_direction dir) |
| 294 | { |
| 295 | __dma_page_dev_to_cpu(pfn_to_page(dma_to_pfn(dev, handle)), |
| 296 | handle & ~PAGE_MASK, size, dir); |
| 297 | } |
| 298 | #endif /* CONFIG_DMABOUNCE */ |
| 299 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 300 | /** |
| 301 | * dma_map_single - map a single buffer for streaming DMA |
| 302 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 303 | * @cpu_addr: CPU direct mapped address of buffer |
| 304 | * @size: size of buffer to map |
| 305 | * @dir: DMA transfer direction |
| 306 | * |
| 307 | * Ensure that any data held in the cache is appropriately discarded |
| 308 | * or written back. |
| 309 | * |
| 310 | * The device owns this memory once this call has completed. The CPU |
| 311 | * can regain ownership by calling dma_unmap_single() or |
| 312 | * dma_sync_single_for_cpu(). |
| 313 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 314 | static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr, |
| 315 | size_t size, enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 316 | { |
Russell King | 8021a4a | 2011-07-03 16:13:58 +0100 | [diff] [blame] | 317 | unsigned long offset; |
| 318 | struct page *page; |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 319 | dma_addr_t addr; |
| 320 | |
Russell King | 8021a4a | 2011-07-03 16:13:58 +0100 | [diff] [blame] | 321 | BUG_ON(!virt_addr_valid(cpu_addr)); |
| 322 | BUG_ON(!virt_addr_valid(cpu_addr + size - 1)); |
Russell King | 0e18b5d | 2008-09-29 13:48:17 +0100 | [diff] [blame] | 323 | BUG_ON(!valid_dma_direction(dir)); |
| 324 | |
Russell King | 8021a4a | 2011-07-03 16:13:58 +0100 | [diff] [blame] | 325 | page = virt_to_page(cpu_addr); |
| 326 | offset = (unsigned long)cpu_addr & ~PAGE_MASK; |
| 327 | addr = __dma_map_page(dev, page, offset, size, dir); |
| 328 | debug_dma_map_page(dev, page, offset, size, dir, addr, true); |
Lennert Buytenhek | 23759dc | 2006-04-02 00:07:39 +0100 | [diff] [blame] | 329 | |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 330 | return addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 331 | } |
Russell King | 125ab12 | 2008-09-25 22:16:22 +0100 | [diff] [blame] | 332 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 333 | /** |
| 334 | * dma_map_page - map a portion of a page for streaming DMA |
| 335 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 336 | * @page: page that buffer resides in |
| 337 | * @offset: offset into page for start of buffer |
| 338 | * @size: size of buffer to map |
| 339 | * @dir: DMA transfer direction |
| 340 | * |
| 341 | * Ensure that any data held in the cache is appropriately discarded |
| 342 | * or written back. |
| 343 | * |
| 344 | * The device owns this memory once this call has completed. The CPU |
Russell King | 7807c60 | 2008-09-30 11:30:24 +0100 | [diff] [blame] | 345 | * can regain ownership by calling dma_unmap_page(). |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 346 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 347 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
| 348 | unsigned long offset, size_t size, enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 349 | { |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 350 | dma_addr_t addr; |
| 351 | |
Russell King | 0e18b5d | 2008-09-29 13:48:17 +0100 | [diff] [blame] | 352 | BUG_ON(!valid_dma_direction(dir)); |
| 353 | |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 354 | addr = __dma_map_page(dev, page, offset, size, dir); |
| 355 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
Russell King | 56f55f8 | 2008-09-25 20:59:12 +0100 | [diff] [blame] | 356 | |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 357 | return addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | } |
| 359 | |
| 360 | /** |
| 361 | * dma_unmap_single - unmap a single buffer previously mapped |
| 362 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 363 | * @handle: DMA address of buffer |
Russell King | 7807c60 | 2008-09-30 11:30:24 +0100 | [diff] [blame] | 364 | * @size: size of buffer (same as passed to dma_map_single) |
| 365 | * @dir: DMA transfer direction (same as passed to dma_map_single) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 366 | * |
| 367 | * Unmap a single streaming mode DMA translation. The handle and size |
| 368 | * must match what was provided in the previous dma_map_single() call. |
| 369 | * All other usages are undefined. |
| 370 | * |
| 371 | * After this call, reads by the CPU to the buffer are guaranteed to see |
| 372 | * whatever the device wrote there. |
| 373 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 374 | static inline void dma_unmap_single(struct device *dev, dma_addr_t handle, |
| 375 | size_t size, enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 376 | { |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 377 | debug_dma_unmap_page(dev, handle, size, dir, true); |
Russell King | 8021a4a | 2011-07-03 16:13:58 +0100 | [diff] [blame] | 378 | __dma_unmap_page(dev, handle, size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 379 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 380 | |
| 381 | /** |
| 382 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() |
| 383 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 384 | * @handle: DMA address of buffer |
Russell King | 7807c60 | 2008-09-30 11:30:24 +0100 | [diff] [blame] | 385 | * @size: size of buffer (same as passed to dma_map_page) |
| 386 | * @dir: DMA transfer direction (same as passed to dma_map_page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 387 | * |
Russell King | 7807c60 | 2008-09-30 11:30:24 +0100 | [diff] [blame] | 388 | * Unmap a page streaming mode DMA translation. The handle and size |
| 389 | * must match what was provided in the previous dma_map_page() call. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 390 | * All other usages are undefined. |
| 391 | * |
| 392 | * After this call, reads by the CPU to the buffer are guaranteed to see |
| 393 | * whatever the device wrote there. |
| 394 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 395 | static inline void dma_unmap_page(struct device *dev, dma_addr_t handle, |
| 396 | size_t size, enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | { |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 398 | debug_dma_unmap_page(dev, handle, size, dir, false); |
| 399 | __dma_unmap_page(dev, handle, size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | } |
| 401 | |
| 402 | /** |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 403 | * dma_sync_single_range_for_cpu |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 405 | * @handle: DMA address of buffer |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 406 | * @offset: offset of region to start sync |
| 407 | * @size: size of region to sync |
| 408 | * @dir: DMA transfer direction (same as passed to dma_map_single) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 409 | * |
| 410 | * Make physical memory consistent for a single streaming mode DMA |
| 411 | * translation after a transfer. |
| 412 | * |
| 413 | * If you perform a dma_map_single() but wish to interrogate the |
| 414 | * buffer using the cpu, yet do not wish to teardown the PCI dma |
| 415 | * mapping, you must call this function before doing so. At the |
| 416 | * next point you give the PCI dma address back to the card, you |
| 417 | * must first the perform a dma_sync_for_device, and then the |
| 418 | * device again owns the buffer. |
| 419 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 420 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
| 421 | dma_addr_t handle, unsigned long offset, size_t size, |
| 422 | enum dma_data_direction dir) |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 423 | { |
Russell King | 0e18b5d | 2008-09-29 13:48:17 +0100 | [diff] [blame] | 424 | BUG_ON(!valid_dma_direction(dir)); |
| 425 | |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 426 | debug_dma_sync_single_for_cpu(dev, handle + offset, size, dir); |
| 427 | |
Russell King | 18eabe2 | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 428 | if (!dmabounce_sync_for_cpu(dev, handle, offset, size, dir)) |
| 429 | return; |
| 430 | |
| 431 | __dma_single_dev_to_cpu(dma_to_virt(dev, handle) + offset, size, dir); |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 432 | } |
| 433 | |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 434 | static inline void dma_sync_single_range_for_device(struct device *dev, |
| 435 | dma_addr_t handle, unsigned long offset, size_t size, |
| 436 | enum dma_data_direction dir) |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 437 | { |
Russell King | 0e18b5d | 2008-09-29 13:48:17 +0100 | [diff] [blame] | 438 | BUG_ON(!valid_dma_direction(dir)); |
| 439 | |
Russell King | 24056f5 | 2011-01-03 11:29:28 +0000 | [diff] [blame] | 440 | debug_dma_sync_single_for_device(dev, handle + offset, size, dir); |
| 441 | |
Russell King | 8c8a0ec | 2008-09-25 21:52:49 +0100 | [diff] [blame] | 442 | if (!dmabounce_sync_for_device(dev, handle, offset, size, dir)) |
| 443 | return; |
| 444 | |
Russell King | 18eabe2 | 2009-10-31 16:52:16 +0000 | [diff] [blame] | 445 | __dma_single_cpu_to_dev(dma_to_virt(dev, handle) + offset, size, dir); |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 446 | } |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 447 | |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 448 | static inline void dma_sync_single_for_cpu(struct device *dev, |
| 449 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 450 | { |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 451 | dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | } |
| 453 | |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 454 | static inline void dma_sync_single_for_device(struct device *dev, |
| 455 | dma_addr_t handle, size_t size, enum dma_data_direction dir) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 456 | { |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 457 | dma_sync_single_range_for_device(dev, handle, 0, size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 458 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 459 | |
Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 460 | /* |
| 461 | * The scatter list versions of the above methods. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 462 | */ |
Russell King | 3216a97 | 2008-09-25 22:23:31 +0100 | [diff] [blame] | 463 | extern int dma_map_sg(struct device *, struct scatterlist *, int, |
| 464 | enum dma_data_direction); |
| 465 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, |
| 466 | enum dma_data_direction); |
| 467 | extern void dma_sync_sg_for_cpu(struct device *, struct scatterlist *, int, |
| 468 | enum dma_data_direction); |
| 469 | extern void dma_sync_sg_for_device(struct device *, struct scatterlist *, int, |
| 470 | enum dma_data_direction); |
Russell King | afd1a32 | 2008-09-25 16:30:57 +0100 | [diff] [blame] | 471 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 472 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 473 | #endif /* __KERNEL__ */ |
| 474 | #endif |