Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #ifndef ASMARM_DMA_MAPPING_H |
| 2 | #define ASMARM_DMA_MAPPING_H |
| 3 | |
| 4 | #ifdef __KERNEL__ |
| 5 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 6 | #include <linux/mm_types.h> |
Jens Axboe | dee9ba8 | 2007-10-23 12:37:59 +0200 | [diff] [blame] | 7 | #include <linux/scatterlist.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 8 | |
Dmitry Baryshkov | 1fe5326 | 2008-07-18 13:30:14 +0400 | [diff] [blame] | 9 | #include <asm-generic/dma-coherent.h> |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 10 | #include <asm/memory.h> |
| 11 | |
| 12 | /* |
| 13 | * page_to_dma/dma_to_virt/virt_to_dma are architecture private functions |
| 14 | * used internally by the DMA-mapping API to provide DMA addresses. They |
| 15 | * must not be used by drivers. |
| 16 | */ |
| 17 | #ifndef __arch_page_to_dma |
| 18 | static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) |
| 19 | { |
| 20 | return (dma_addr_t)__virt_to_bus((unsigned long)page_address(page)); |
| 21 | } |
| 22 | |
| 23 | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) |
| 24 | { |
| 25 | return (void *)__bus_to_virt(addr); |
| 26 | } |
| 27 | |
| 28 | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) |
| 29 | { |
| 30 | return (dma_addr_t)__virt_to_bus((unsigned long)(addr)); |
| 31 | } |
| 32 | #else |
| 33 | static inline dma_addr_t page_to_dma(struct device *dev, struct page *page) |
| 34 | { |
| 35 | return __arch_page_to_dma(dev, page); |
| 36 | } |
| 37 | |
| 38 | static inline void *dma_to_virt(struct device *dev, dma_addr_t addr) |
| 39 | { |
| 40 | return __arch_dma_to_virt(dev, addr); |
| 41 | } |
| 42 | |
| 43 | static inline dma_addr_t virt_to_dma(struct device *dev, void *addr) |
| 44 | { |
| 45 | return __arch_virt_to_dma(dev, addr); |
| 46 | } |
| 47 | #endif |
Dmitry Baryshkov | 1fe5326 | 2008-07-18 13:30:14 +0400 | [diff] [blame] | 48 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | /* |
| 50 | * DMA-consistent mapping functions. These allocate/free a region of |
| 51 | * uncached, unwrite-buffered mapped memory space for use with DMA |
| 52 | * devices. This is the "generic" version. The PCI specific version |
| 53 | * is in pci.h |
Dan Williams | 105ef9a | 2006-11-21 22:57:23 +0100 | [diff] [blame] | 54 | * |
| 55 | * Note: Drivers should NOT use this function directly, as it will break |
| 56 | * platforms with CONFIG_DMABOUNCE. |
| 57 | * Use the driver DMA support - see dma-mapping.h (dma_sync_*) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 58 | */ |
Russell King | 84aa462 | 2007-10-09 14:17:01 +0100 | [diff] [blame] | 59 | extern void dma_cache_maint(const void *kaddr, size_t size, int rw); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
| 61 | /* |
| 62 | * Return whether the given device DMA address mask can be supported |
| 63 | * properly. For example, if your device can only drive the low 24-bits |
| 64 | * during bus mastering, then you would pass 0x00ffffff as the mask |
| 65 | * to this function. |
akpm@osdl.org | 7a228aa | 2005-04-16 15:23:57 -0700 | [diff] [blame] | 66 | * |
| 67 | * FIXME: This should really be a platform specific issue - we should |
| 68 | * return false if GFP_DMA allocations may not satisfy the supplied 'mask'. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | */ |
| 70 | static inline int dma_supported(struct device *dev, u64 mask) |
| 71 | { |
| 72 | return dev->dma_mask && *dev->dma_mask != 0; |
| 73 | } |
| 74 | |
| 75 | static inline int dma_set_mask(struct device *dev, u64 dma_mask) |
| 76 | { |
| 77 | if (!dev->dma_mask || !dma_supported(dev, dma_mask)) |
| 78 | return -EIO; |
| 79 | |
| 80 | *dev->dma_mask = dma_mask; |
| 81 | |
| 82 | return 0; |
| 83 | } |
| 84 | |
| 85 | static inline int dma_get_cache_alignment(void) |
| 86 | { |
| 87 | return 32; |
| 88 | } |
| 89 | |
Ralf Baechle | f67637e | 2006-12-06 20:38:54 -0800 | [diff] [blame] | 90 | static inline int dma_is_consistent(struct device *dev, dma_addr_t handle) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | { |
Lennert Buytenhek | 23759dc | 2006-04-02 00:07:39 +0100 | [diff] [blame] | 92 | return !!arch_is_coherent(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | /* |
| 96 | * DMA errors are defined by all-bits-set in the DMA address. |
| 97 | */ |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 98 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 99 | { |
| 100 | return dma_addr == ~0; |
| 101 | } |
| 102 | |
Russell King | f454aa6 | 2007-02-12 19:26:05 +0000 | [diff] [blame] | 103 | /* |
| 104 | * Dummy noncoherent implementation. We don't provide a dma_cache_sync |
| 105 | * function so drivers using this API are highlighted with build warnings. |
| 106 | */ |
| 107 | static inline void * |
| 108 | dma_alloc_noncoherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp) |
| 109 | { |
| 110 | return NULL; |
| 111 | } |
| 112 | |
| 113 | static inline void |
| 114 | dma_free_noncoherent(struct device *dev, size_t size, void *cpu_addr, |
| 115 | dma_addr_t handle) |
| 116 | { |
| 117 | } |
| 118 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | /** |
| 120 | * dma_alloc_coherent - allocate consistent memory for DMA |
| 121 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 122 | * @size: required memory size |
| 123 | * @handle: bus-specific DMA address |
| 124 | * |
| 125 | * Allocate some uncached, unbuffered memory for a device for |
| 126 | * performing DMA. This function allocates pages, and will |
| 127 | * return the CPU-viewed address, and sets @handle to be the |
| 128 | * device-viewed address. |
| 129 | */ |
| 130 | extern void * |
Al Viro | f9e3214 | 2005-10-21 03:20:58 -0400 | [diff] [blame] | 131 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 132 | |
| 133 | /** |
| 134 | * dma_free_coherent - free memory allocated by dma_alloc_coherent |
| 135 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 136 | * @size: size of memory originally requested in dma_alloc_coherent |
| 137 | * @cpu_addr: CPU-view address returned from dma_alloc_coherent |
| 138 | * @handle: device-view address returned from dma_alloc_coherent |
| 139 | * |
| 140 | * Free (and unmap) a DMA buffer previously allocated by |
| 141 | * dma_alloc_coherent(). |
| 142 | * |
| 143 | * References to memory and mappings associated with cpu_addr/handle |
| 144 | * during and after this call executing are illegal. |
| 145 | */ |
| 146 | extern void |
| 147 | dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, |
| 148 | dma_addr_t handle); |
| 149 | |
| 150 | /** |
| 151 | * dma_mmap_coherent - map a coherent DMA allocation into user space |
| 152 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 153 | * @vma: vm_area_struct describing requested user mapping |
| 154 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_coherent |
| 155 | * @handle: device-view address returned from dma_alloc_coherent |
| 156 | * @size: size of memory originally requested in dma_alloc_coherent |
| 157 | * |
| 158 | * Map a coherent DMA buffer previously allocated by dma_alloc_coherent |
| 159 | * into user space. The coherent DMA buffer must not be freed by the |
| 160 | * driver until the user space mapping has been released. |
| 161 | */ |
| 162 | int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma, |
| 163 | void *cpu_addr, dma_addr_t handle, size_t size); |
| 164 | |
| 165 | |
| 166 | /** |
| 167 | * dma_alloc_writecombine - allocate writecombining memory for DMA |
| 168 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 169 | * @size: required memory size |
| 170 | * @handle: bus-specific DMA address |
| 171 | * |
| 172 | * Allocate some uncached, buffered memory for a device for |
| 173 | * performing DMA. This function allocates pages, and will |
| 174 | * return the CPU-viewed address, and sets @handle to be the |
| 175 | * device-viewed address. |
| 176 | */ |
| 177 | extern void * |
Al Viro | f9e3214 | 2005-10-21 03:20:58 -0400 | [diff] [blame] | 178 | dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 179 | |
| 180 | #define dma_free_writecombine(dev,size,cpu_addr,handle) \ |
| 181 | dma_free_coherent(dev,size,cpu_addr,handle) |
| 182 | |
| 183 | int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma, |
| 184 | void *cpu_addr, dma_addr_t handle, size_t size); |
| 185 | |
| 186 | |
| 187 | /** |
| 188 | * dma_map_single - map a single buffer for streaming DMA |
| 189 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 190 | * @cpu_addr: CPU direct mapped address of buffer |
| 191 | * @size: size of buffer to map |
| 192 | * @dir: DMA transfer direction |
| 193 | * |
| 194 | * Ensure that any data held in the cache is appropriately discarded |
| 195 | * or written back. |
| 196 | * |
| 197 | * The device owns this memory once this call has completed. The CPU |
| 198 | * can regain ownership by calling dma_unmap_single() or |
| 199 | * dma_sync_single_for_cpu(). |
| 200 | */ |
| 201 | #ifndef CONFIG_DMABOUNCE |
| 202 | static inline dma_addr_t |
| 203 | dma_map_single(struct device *dev, void *cpu_addr, size_t size, |
| 204 | enum dma_data_direction dir) |
| 205 | { |
Lennert Buytenhek | 23759dc | 2006-04-02 00:07:39 +0100 | [diff] [blame] | 206 | if (!arch_is_coherent()) |
Russell King | 84aa462 | 2007-10-09 14:17:01 +0100 | [diff] [blame] | 207 | dma_cache_maint(cpu_addr, size, dir); |
Lennert Buytenhek | 23759dc | 2006-04-02 00:07:39 +0100 | [diff] [blame] | 208 | |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 209 | return virt_to_dma(dev, cpu_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 210 | } |
| 211 | #else |
| 212 | extern dma_addr_t dma_map_single(struct device *,void *, size_t, enum dma_data_direction); |
| 213 | #endif |
| 214 | |
| 215 | /** |
| 216 | * dma_map_page - map a portion of a page for streaming DMA |
| 217 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 218 | * @page: page that buffer resides in |
| 219 | * @offset: offset into page for start of buffer |
| 220 | * @size: size of buffer to map |
| 221 | * @dir: DMA transfer direction |
| 222 | * |
| 223 | * Ensure that any data held in the cache is appropriately discarded |
| 224 | * or written back. |
| 225 | * |
| 226 | * The device owns this memory once this call has completed. The CPU |
| 227 | * can regain ownership by calling dma_unmap_page() or |
| 228 | * dma_sync_single_for_cpu(). |
| 229 | */ |
| 230 | static inline dma_addr_t |
| 231 | dma_map_page(struct device *dev, struct page *page, |
| 232 | unsigned long offset, size_t size, |
| 233 | enum dma_data_direction dir) |
| 234 | { |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 235 | return dma_map_single(dev, page_address(page) + offset, size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 236 | } |
| 237 | |
| 238 | /** |
| 239 | * dma_unmap_single - unmap a single buffer previously mapped |
| 240 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 241 | * @handle: DMA address of buffer |
| 242 | * @size: size of buffer to map |
| 243 | * @dir: DMA transfer direction |
| 244 | * |
| 245 | * Unmap a single streaming mode DMA translation. The handle and size |
| 246 | * must match what was provided in the previous dma_map_single() call. |
| 247 | * All other usages are undefined. |
| 248 | * |
| 249 | * After this call, reads by the CPU to the buffer are guaranteed to see |
| 250 | * whatever the device wrote there. |
| 251 | */ |
| 252 | #ifndef CONFIG_DMABOUNCE |
| 253 | static inline void |
| 254 | dma_unmap_single(struct device *dev, dma_addr_t handle, size_t size, |
| 255 | enum dma_data_direction dir) |
| 256 | { |
| 257 | /* nothing to do */ |
| 258 | } |
| 259 | #else |
| 260 | extern void dma_unmap_single(struct device *, dma_addr_t, size_t, enum dma_data_direction); |
| 261 | #endif |
| 262 | |
| 263 | /** |
| 264 | * dma_unmap_page - unmap a buffer previously mapped through dma_map_page() |
| 265 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 266 | * @handle: DMA address of buffer |
| 267 | * @size: size of buffer to map |
| 268 | * @dir: DMA transfer direction |
| 269 | * |
| 270 | * Unmap a single streaming mode DMA translation. The handle and size |
| 271 | * must match what was provided in the previous dma_map_single() call. |
| 272 | * All other usages are undefined. |
| 273 | * |
| 274 | * After this call, reads by the CPU to the buffer are guaranteed to see |
| 275 | * whatever the device wrote there. |
| 276 | */ |
| 277 | static inline void |
| 278 | dma_unmap_page(struct device *dev, dma_addr_t handle, size_t size, |
| 279 | enum dma_data_direction dir) |
| 280 | { |
Russell King | 98ed7d4 | 2008-08-10 12:10:49 +0100 | [diff] [blame] | 281 | dma_unmap_single(dev, handle, size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | } |
| 283 | |
| 284 | /** |
| 285 | * dma_map_sg - map a set of SG buffers for streaming mode DMA |
| 286 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 287 | * @sg: list of buffers |
| 288 | * @nents: number of buffers to map |
| 289 | * @dir: DMA transfer direction |
| 290 | * |
| 291 | * Map a set of buffers described by scatterlist in streaming |
| 292 | * mode for DMA. This is the scatter-gather version of the |
| 293 | * above dma_map_single interface. Here the scatter gather list |
| 294 | * elements are each tagged with the appropriate dma address |
| 295 | * and length. They are obtained via sg_dma_{address,length}(SG). |
| 296 | * |
| 297 | * NOTE: An implementation may be able to use a smaller number of |
| 298 | * DMA address/length pairs than there are SG table elements. |
| 299 | * (for example via virtual mapping capabilities) |
| 300 | * The routine returns the number of addr/length pairs actually |
| 301 | * used, at most nents. |
| 302 | * |
| 303 | * Device ownership issues as mentioned above for dma_map_single are |
| 304 | * the same here. |
| 305 | */ |
| 306 | #ifndef CONFIG_DMABOUNCE |
| 307 | static inline int |
| 308 | dma_map_sg(struct device *dev, struct scatterlist *sg, int nents, |
| 309 | enum dma_data_direction dir) |
| 310 | { |
| 311 | int i; |
| 312 | |
| 313 | for (i = 0; i < nents; i++, sg++) { |
| 314 | char *virt; |
| 315 | |
Jens Axboe | dee9ba8 | 2007-10-23 12:37:59 +0200 | [diff] [blame] | 316 | sg->dma_address = page_to_dma(dev, sg_page(sg)) + sg->offset; |
| 317 | virt = sg_virt(sg); |
Lennert Buytenhek | 23759dc | 2006-04-02 00:07:39 +0100 | [diff] [blame] | 318 | |
| 319 | if (!arch_is_coherent()) |
Russell King | 84aa462 | 2007-10-09 14:17:01 +0100 | [diff] [blame] | 320 | dma_cache_maint(virt, sg->length, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 321 | } |
| 322 | |
| 323 | return nents; |
| 324 | } |
| 325 | #else |
| 326 | extern int dma_map_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); |
| 327 | #endif |
| 328 | |
| 329 | /** |
| 330 | * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg |
| 331 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 332 | * @sg: list of buffers |
| 333 | * @nents: number of buffers to map |
| 334 | * @dir: DMA transfer direction |
| 335 | * |
| 336 | * Unmap a set of streaming mode DMA translations. |
| 337 | * Again, CPU read rules concerning calls here are the same as for |
| 338 | * dma_unmap_single() above. |
| 339 | */ |
| 340 | #ifndef CONFIG_DMABOUNCE |
| 341 | static inline void |
| 342 | dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents, |
| 343 | enum dma_data_direction dir) |
| 344 | { |
| 345 | |
| 346 | /* nothing to do */ |
| 347 | } |
| 348 | #else |
| 349 | extern void dma_unmap_sg(struct device *, struct scatterlist *, int, enum dma_data_direction); |
| 350 | #endif |
| 351 | |
| 352 | |
| 353 | /** |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 354 | * dma_sync_single_range_for_cpu |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 356 | * @handle: DMA address of buffer |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 357 | * @offset: offset of region to start sync |
| 358 | * @size: size of region to sync |
| 359 | * @dir: DMA transfer direction (same as passed to dma_map_single) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 360 | * |
| 361 | * Make physical memory consistent for a single streaming mode DMA |
| 362 | * translation after a transfer. |
| 363 | * |
| 364 | * If you perform a dma_map_single() but wish to interrogate the |
| 365 | * buffer using the cpu, yet do not wish to teardown the PCI dma |
| 366 | * mapping, you must call this function before doing so. At the |
| 367 | * next point you give the PCI dma address back to the card, you |
| 368 | * must first the perform a dma_sync_for_device, and then the |
| 369 | * device again owns the buffer. |
| 370 | */ |
| 371 | #ifndef CONFIG_DMABOUNCE |
| 372 | static inline void |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 373 | dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t handle, |
| 374 | unsigned long offset, size_t size, |
| 375 | enum dma_data_direction dir) |
| 376 | { |
| 377 | if (!arch_is_coherent()) |
| 378 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); |
| 379 | } |
| 380 | |
| 381 | static inline void |
| 382 | dma_sync_single_range_for_device(struct device *dev, dma_addr_t handle, |
| 383 | unsigned long offset, size_t size, |
| 384 | enum dma_data_direction dir) |
| 385 | { |
| 386 | if (!arch_is_coherent()) |
| 387 | dma_cache_maint(dma_to_virt(dev, handle) + offset, size, dir); |
| 388 | } |
| 389 | #else |
| 390 | extern void dma_sync_single_range_for_cpu(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); |
| 391 | extern void dma_sync_single_range_for_device(struct device *, dma_addr_t, unsigned long, size_t, enum dma_data_direction); |
| 392 | #endif |
| 393 | |
| 394 | static inline void |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 395 | dma_sync_single_for_cpu(struct device *dev, dma_addr_t handle, size_t size, |
| 396 | enum dma_data_direction dir) |
| 397 | { |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 398 | dma_sync_single_range_for_cpu(dev, handle, 0, size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 399 | } |
| 400 | |
| 401 | static inline void |
| 402 | dma_sync_single_for_device(struct device *dev, dma_addr_t handle, size_t size, |
| 403 | enum dma_data_direction dir) |
| 404 | { |
Russell King | 9dd4286 | 2008-08-10 12:18:26 +0100 | [diff] [blame] | 405 | dma_sync_single_range_for_device(dev, handle, 0, size, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 406 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 407 | |
| 408 | |
| 409 | /** |
| 410 | * dma_sync_sg_for_cpu |
| 411 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 412 | * @sg: list of buffers |
| 413 | * @nents: number of buffers to map |
| 414 | * @dir: DMA transfer direction |
| 415 | * |
| 416 | * Make physical memory consistent for a set of streaming |
| 417 | * mode DMA translations after a transfer. |
| 418 | * |
| 419 | * The same as dma_sync_single_for_* but for a scatter-gather list, |
| 420 | * same rules and usage. |
| 421 | */ |
| 422 | #ifndef CONFIG_DMABOUNCE |
| 423 | static inline void |
| 424 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nents, |
| 425 | enum dma_data_direction dir) |
| 426 | { |
| 427 | int i; |
| 428 | |
| 429 | for (i = 0; i < nents; i++, sg++) { |
Jens Axboe | dee9ba8 | 2007-10-23 12:37:59 +0200 | [diff] [blame] | 430 | char *virt = sg_virt(sg); |
Lennert Buytenhek | 23759dc | 2006-04-02 00:07:39 +0100 | [diff] [blame] | 431 | if (!arch_is_coherent()) |
Russell King | 84aa462 | 2007-10-09 14:17:01 +0100 | [diff] [blame] | 432 | dma_cache_maint(virt, sg->length, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 433 | } |
| 434 | } |
| 435 | |
| 436 | static inline void |
| 437 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nents, |
| 438 | enum dma_data_direction dir) |
| 439 | { |
| 440 | int i; |
| 441 | |
| 442 | for (i = 0; i < nents; i++, sg++) { |
Jens Axboe | dee9ba8 | 2007-10-23 12:37:59 +0200 | [diff] [blame] | 443 | char *virt = sg_virt(sg); |
Lennert Buytenhek | 23759dc | 2006-04-02 00:07:39 +0100 | [diff] [blame] | 444 | if (!arch_is_coherent()) |
Russell King | 84aa462 | 2007-10-09 14:17:01 +0100 | [diff] [blame] | 445 | dma_cache_maint(virt, sg->length, dir); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 446 | } |
| 447 | } |
| 448 | #else |
| 449 | extern void dma_sync_sg_for_cpu(struct device*, struct scatterlist*, int, enum dma_data_direction); |
| 450 | extern void dma_sync_sg_for_device(struct device*, struct scatterlist*, int, enum dma_data_direction); |
| 451 | #endif |
| 452 | |
| 453 | #ifdef CONFIG_DMABOUNCE |
| 454 | /* |
| 455 | * For SA-1111, IXP425, and ADI systems the dma-mapping functions are "magic" |
| 456 | * and utilize bounce buffers as needed to work around limited DMA windows. |
| 457 | * |
| 458 | * On the SA-1111, a bug limits DMA to only certain regions of RAM. |
| 459 | * On the IXP425, the PCI inbound window is 64MB (256MB total RAM) |
Robert P. J. Day | 3a4fa0a | 2007-10-19 23:10:43 +0200 | [diff] [blame] | 460 | * On some ADI engineering systems, PCI inbound window is 32MB (12MB total RAM) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 461 | * |
| 462 | * The following are helper functions used by the dmabounce subystem |
| 463 | * |
| 464 | */ |
| 465 | |
| 466 | /** |
| 467 | * dmabounce_register_dev |
| 468 | * |
| 469 | * @dev: valid struct device pointer |
| 470 | * @small_buf_size: size of buffers to use with small buffer pool |
| 471 | * @large_buf_size: size of buffers to use with large buffer pool (can be 0) |
| 472 | * |
| 473 | * This function should be called by low-level platform code to register |
| 474 | * a device as requireing DMA buffer bouncing. The function will allocate |
| 475 | * appropriate DMA pools for the device. |
| 476 | * |
| 477 | */ |
| 478 | extern int dmabounce_register_dev(struct device *, unsigned long, unsigned long); |
| 479 | |
| 480 | /** |
| 481 | * dmabounce_unregister_dev |
| 482 | * |
| 483 | * @dev: valid struct device pointer |
| 484 | * |
| 485 | * This function should be called by low-level platform code when device |
| 486 | * that was previously registered with dmabounce_register_dev is removed |
| 487 | * from the system. |
| 488 | * |
| 489 | */ |
| 490 | extern void dmabounce_unregister_dev(struct device *); |
| 491 | |
| 492 | /** |
| 493 | * dma_needs_bounce |
| 494 | * |
| 495 | * @dev: valid struct device pointer |
| 496 | * @dma_handle: dma_handle of unbounced buffer |
| 497 | * @size: size of region being mapped |
| 498 | * |
| 499 | * Platforms that utilize the dmabounce mechanism must implement |
| 500 | * this function. |
| 501 | * |
| 502 | * The dmabounce routines call this function whenever a dma-mapping |
| 503 | * is requested to determine whether a given buffer needs to be bounced |
Michael Opdenacker | 59c5159 | 2007-05-09 08:57:56 +0200 | [diff] [blame] | 504 | * or not. The function must return 0 if the buffer is OK for |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | * DMA access and 1 if the buffer needs to be bounced. |
| 506 | * |
| 507 | */ |
| 508 | extern int dma_needs_bounce(struct device*, dma_addr_t, size_t); |
| 509 | #endif /* CONFIG_DMABOUNCE */ |
| 510 | |
| 511 | #endif /* __KERNEL__ */ |
| 512 | #endif |