H. Peter Anvin | 1965aae | 2008-10-22 22:26:29 -0700 | [diff] [blame] | 1 | #ifndef _ASM_X86_DMA_MAPPING_H |
| 2 | #define _ASM_X86_DMA_MAPPING_H |
Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 3 | |
| 4 | /* |
Randy Dunlap | 5872fb9 | 2009-01-29 16:28:02 -0800 | [diff] [blame^] | 5 | * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and |
| 6 | * Documentation/DMA-API.txt for documentation. |
Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 7 | */ |
| 8 | |
| 9 | #include <linux/scatterlist.h> |
| 10 | #include <asm/io.h> |
| 11 | #include <asm/swiotlb.h> |
Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 12 | #include <asm-generic/dma-coherent.h> |
Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 13 | |
Glauber Costa | 7c18341 | 2008-03-25 18:36:36 -0300 | [diff] [blame] | 14 | extern dma_addr_t bad_dma_address; |
Glauber Costa | b7107a3 | 2008-03-25 18:36:39 -0300 | [diff] [blame] | 15 | extern int iommu_merge; |
Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 16 | extern struct device x86_dma_fallback_dev; |
Glauber Costa | b7107a3 | 2008-03-25 18:36:39 -0300 | [diff] [blame] | 17 | extern int panic_on_overflow; |
Glauber Costa | 7c18341 | 2008-03-25 18:36:36 -0300 | [diff] [blame] | 18 | |
Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 19 | struct dma_mapping_ops { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 20 | int (*mapping_error)(struct device *dev, |
| 21 | dma_addr_t dma_addr); |
Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 22 | void* (*alloc_coherent)(struct device *dev, size_t size, |
| 23 | dma_addr_t *dma_handle, gfp_t gfp); |
| 24 | void (*free_coherent)(struct device *dev, size_t size, |
| 25 | void *vaddr, dma_addr_t dma_handle); |
Ingo Molnar | 2be6214 | 2008-04-19 19:19:56 +0200 | [diff] [blame] | 26 | dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, |
Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 27 | size_t size, int direction); |
Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 28 | void (*unmap_single)(struct device *dev, dma_addr_t addr, |
| 29 | size_t size, int direction); |
| 30 | void (*sync_single_for_cpu)(struct device *hwdev, |
| 31 | dma_addr_t dma_handle, size_t size, |
| 32 | int direction); |
| 33 | void (*sync_single_for_device)(struct device *hwdev, |
| 34 | dma_addr_t dma_handle, size_t size, |
| 35 | int direction); |
| 36 | void (*sync_single_range_for_cpu)(struct device *hwdev, |
| 37 | dma_addr_t dma_handle, unsigned long offset, |
| 38 | size_t size, int direction); |
| 39 | void (*sync_single_range_for_device)(struct device *hwdev, |
| 40 | dma_addr_t dma_handle, unsigned long offset, |
| 41 | size_t size, int direction); |
| 42 | void (*sync_sg_for_cpu)(struct device *hwdev, |
| 43 | struct scatterlist *sg, int nelems, |
| 44 | int direction); |
| 45 | void (*sync_sg_for_device)(struct device *hwdev, |
| 46 | struct scatterlist *sg, int nelems, |
| 47 | int direction); |
| 48 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, |
| 49 | int nents, int direction); |
| 50 | void (*unmap_sg)(struct device *hwdev, |
| 51 | struct scatterlist *sg, int nents, |
| 52 | int direction); |
| 53 | int (*dma_supported)(struct device *hwdev, u64 mask); |
| 54 | int is_phys; |
| 55 | }; |
| 56 | |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 57 | extern struct dma_mapping_ops *dma_ops; |
Glauber Costa | 22456b9 | 2008-03-25 18:36:21 -0300 | [diff] [blame] | 58 | |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 59 | static inline struct dma_mapping_ops *get_dma_ops(struct device *dev) |
Glauber Costa | c786df0 | 2008-03-25 18:36:37 -0300 | [diff] [blame] | 60 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 61 | #ifdef CONFIG_X86_32 |
| 62 | return dma_ops; |
| 63 | #else |
| 64 | if (unlikely(!dev) || !dev->archdata.dma_ops) |
| 65 | return dma_ops; |
| 66 | else |
| 67 | return dev->archdata.dma_ops; |
Jeremy Fitzhardinge | cfb80c9 | 2008-12-16 12:17:36 -0800 | [diff] [blame] | 68 | #endif |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 69 | } |
| 70 | |
| 71 | /* Make sure we keep the same behaviour */ |
| 72 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 73 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 74 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
| 75 | if (ops->mapping_error) |
| 76 | return ops->mapping_error(dev, dma_addr); |
Glauber Costa | c786df0 | 2008-03-25 18:36:37 -0300 | [diff] [blame] | 77 | |
Thomas Bogendoerfer | 7b1dedc | 2008-11-29 13:46:27 +0100 | [diff] [blame] | 78 | return (dma_addr == bad_dma_address); |
Glauber Costa | c786df0 | 2008-03-25 18:36:37 -0300 | [diff] [blame] | 79 | } |
| 80 | |
Glauber Costa | 8d396de | 2008-03-25 18:36:31 -0300 | [diff] [blame] | 81 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
| 82 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 83 | #define dma_is_consistent(d, h) (1) |
Glauber Costa | 8d396de | 2008-03-25 18:36:31 -0300 | [diff] [blame] | 84 | |
Glauber Costa | 802c1f6 | 2008-03-25 18:36:34 -0300 | [diff] [blame] | 85 | extern int dma_supported(struct device *hwdev, u64 mask); |
| 86 | extern int dma_set_mask(struct device *dev, u64 mask); |
| 87 | |
FUJITA Tomonori | 9f6ac57 | 2008-09-24 20:48:35 +0900 | [diff] [blame] | 88 | extern void *dma_generic_alloc_coherent(struct device *dev, size_t size, |
| 89 | dma_addr_t *dma_addr, gfp_t flag); |
| 90 | |
Glauber Costa | 22456b9 | 2008-03-25 18:36:21 -0300 | [diff] [blame] | 91 | static inline dma_addr_t |
| 92 | dma_map_single(struct device *hwdev, void *ptr, size_t size, |
| 93 | int direction) |
| 94 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 95 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
| 96 | |
Glauber Costa | 22456b9 | 2008-03-25 18:36:21 -0300 | [diff] [blame] | 97 | BUG_ON(!valid_dma_direction(direction)); |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 98 | return ops->map_single(hwdev, virt_to_phys(ptr), size, direction); |
Glauber Costa | 22456b9 | 2008-03-25 18:36:21 -0300 | [diff] [blame] | 99 | } |
| 100 | |
Glauber Costa | 0cb0ae6 | 2008-03-25 18:36:22 -0300 | [diff] [blame] | 101 | static inline void |
| 102 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
| 103 | int direction) |
| 104 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 105 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
| 106 | |
Glauber Costa | 0cb0ae6 | 2008-03-25 18:36:22 -0300 | [diff] [blame] | 107 | BUG_ON(!valid_dma_direction(direction)); |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 108 | if (ops->unmap_single) |
| 109 | ops->unmap_single(dev, addr, size, direction); |
Glauber Costa | 0cb0ae6 | 2008-03-25 18:36:22 -0300 | [diff] [blame] | 110 | } |
| 111 | |
Glauber Costa | 16a3ce9 | 2008-03-25 18:36:23 -0300 | [diff] [blame] | 112 | static inline int |
| 113 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, |
| 114 | int nents, int direction) |
| 115 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 116 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
| 117 | |
Glauber Costa | 16a3ce9 | 2008-03-25 18:36:23 -0300 | [diff] [blame] | 118 | BUG_ON(!valid_dma_direction(direction)); |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 119 | return ops->map_sg(hwdev, sg, nents, direction); |
Glauber Costa | 16a3ce9 | 2008-03-25 18:36:23 -0300 | [diff] [blame] | 120 | } |
Glauber Costa | 72c784f | 2008-03-25 18:36:24 -0300 | [diff] [blame] | 121 | |
| 122 | static inline void |
| 123 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
| 124 | int direction) |
| 125 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 126 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
| 127 | |
Glauber Costa | 72c784f | 2008-03-25 18:36:24 -0300 | [diff] [blame] | 128 | BUG_ON(!valid_dma_direction(direction)); |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 129 | if (ops->unmap_sg) |
| 130 | ops->unmap_sg(hwdev, sg, nents, direction); |
Glauber Costa | 72c784f | 2008-03-25 18:36:24 -0300 | [diff] [blame] | 131 | } |
Glauber Costa | c01dd8c | 2008-03-25 18:36:25 -0300 | [diff] [blame] | 132 | |
| 133 | static inline void |
| 134 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
| 135 | size_t size, int direction) |
| 136 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 137 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
| 138 | |
Glauber Costa | c01dd8c | 2008-03-25 18:36:25 -0300 | [diff] [blame] | 139 | BUG_ON(!valid_dma_direction(direction)); |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 140 | if (ops->sync_single_for_cpu) |
| 141 | ops->sync_single_for_cpu(hwdev, dma_handle, size, direction); |
Glauber Costa | c01dd8c | 2008-03-25 18:36:25 -0300 | [diff] [blame] | 142 | flush_write_buffers(); |
| 143 | } |
| 144 | |
Glauber Costa | 9231b26 | 2008-03-25 18:36:26 -0300 | [diff] [blame] | 145 | static inline void |
| 146 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, |
| 147 | size_t size, int direction) |
| 148 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 149 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
| 150 | |
Glauber Costa | 9231b26 | 2008-03-25 18:36:26 -0300 | [diff] [blame] | 151 | BUG_ON(!valid_dma_direction(direction)); |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 152 | if (ops->sync_single_for_device) |
| 153 | ops->sync_single_for_device(hwdev, dma_handle, size, direction); |
Glauber Costa | 9231b26 | 2008-03-25 18:36:26 -0300 | [diff] [blame] | 154 | flush_write_buffers(); |
| 155 | } |
| 156 | |
Glauber Costa | 627610f | 2008-03-25 18:36:27 -0300 | [diff] [blame] | 157 | static inline void |
| 158 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
| 159 | unsigned long offset, size_t size, int direction) |
| 160 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 161 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
Glauber Costa | 627610f | 2008-03-25 18:36:27 -0300 | [diff] [blame] | 162 | |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 163 | BUG_ON(!valid_dma_direction(direction)); |
| 164 | if (ops->sync_single_range_for_cpu) |
| 165 | ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, |
| 166 | size, direction); |
Glauber Costa | 627610f | 2008-03-25 18:36:27 -0300 | [diff] [blame] | 167 | flush_write_buffers(); |
| 168 | } |
Glauber Costa | 7136233 | 2008-03-25 18:36:28 -0300 | [diff] [blame] | 169 | |
| 170 | static inline void |
| 171 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, |
| 172 | unsigned long offset, size_t size, |
| 173 | int direction) |
| 174 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 175 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
Glauber Costa | 7136233 | 2008-03-25 18:36:28 -0300 | [diff] [blame] | 176 | |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 177 | BUG_ON(!valid_dma_direction(direction)); |
| 178 | if (ops->sync_single_range_for_device) |
| 179 | ops->sync_single_range_for_device(hwdev, dma_handle, |
| 180 | offset, size, direction); |
Glauber Costa | 7136233 | 2008-03-25 18:36:28 -0300 | [diff] [blame] | 181 | flush_write_buffers(); |
| 182 | } |
| 183 | |
Glauber Costa | ed435de | 2008-03-25 18:36:29 -0300 | [diff] [blame] | 184 | static inline void |
| 185 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
| 186 | int nelems, int direction) |
| 187 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 188 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
| 189 | |
Glauber Costa | ed435de | 2008-03-25 18:36:29 -0300 | [diff] [blame] | 190 | BUG_ON(!valid_dma_direction(direction)); |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 191 | if (ops->sync_sg_for_cpu) |
| 192 | ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); |
Glauber Costa | ed435de | 2008-03-25 18:36:29 -0300 | [diff] [blame] | 193 | flush_write_buffers(); |
| 194 | } |
Glauber Costa | e7f3a91 | 2008-03-25 18:36:30 -0300 | [diff] [blame] | 195 | |
| 196 | static inline void |
| 197 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
| 198 | int nelems, int direction) |
| 199 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 200 | struct dma_mapping_ops *ops = get_dma_ops(hwdev); |
| 201 | |
Glauber Costa | e7f3a91 | 2008-03-25 18:36:30 -0300 | [diff] [blame] | 202 | BUG_ON(!valid_dma_direction(direction)); |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 203 | if (ops->sync_sg_for_device) |
| 204 | ops->sync_sg_for_device(hwdev, sg, nelems, direction); |
Glauber Costa | e7f3a91 | 2008-03-25 18:36:30 -0300 | [diff] [blame] | 205 | |
| 206 | flush_write_buffers(); |
| 207 | } |
Glauber Costa | 4d92fbf | 2008-03-25 18:36:32 -0300 | [diff] [blame] | 208 | |
| 209 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
| 210 | size_t offset, size_t size, |
| 211 | int direction) |
| 212 | { |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 213 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
| 214 | |
Ingo Molnar | 2be6214 | 2008-04-19 19:19:56 +0200 | [diff] [blame] | 215 | BUG_ON(!valid_dma_direction(direction)); |
FUJITA Tomonori | 8d8bb39 | 2008-07-25 19:44:49 -0700 | [diff] [blame] | 216 | return ops->map_single(dev, page_to_phys(page) + offset, |
| 217 | size, direction); |
Glauber Costa | 4d92fbf | 2008-03-25 18:36:32 -0300 | [diff] [blame] | 218 | } |
| 219 | |
| 220 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
| 221 | size_t size, int direction) |
| 222 | { |
| 223 | dma_unmap_single(dev, addr, size, direction); |
| 224 | } |
| 225 | |
Glauber Costa | 3cb6a91 | 2008-03-25 18:36:33 -0300 | [diff] [blame] | 226 | static inline void |
| 227 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
| 228 | enum dma_data_direction dir) |
| 229 | { |
| 230 | flush_write_buffers(); |
| 231 | } |
Glauber Costa | ae17a63b | 2008-03-25 18:36:38 -0300 | [diff] [blame] | 232 | |
Glauber Costa | b7107a3 | 2008-03-25 18:36:39 -0300 | [diff] [blame] | 233 | static inline int dma_get_cache_alignment(void) |
| 234 | { |
| 235 | /* no easy way to get cache size on all x86, so return the |
| 236 | * maximum possible, to be safe */ |
| 237 | return boot_cpu_data.x86_clflush_size; |
| 238 | } |
| 239 | |
FUJITA Tomonori | 823e7e8 | 2008-09-08 18:10:13 +0900 | [diff] [blame] | 240 | static inline unsigned long dma_alloc_coherent_mask(struct device *dev, |
| 241 | gfp_t gfp) |
| 242 | { |
| 243 | unsigned long dma_mask = 0; |
Glauber Costa | b7107a3 | 2008-03-25 18:36:39 -0300 | [diff] [blame] | 244 | |
FUJITA Tomonori | 823e7e8 | 2008-09-08 18:10:13 +0900 | [diff] [blame] | 245 | dma_mask = dev->coherent_dma_mask; |
| 246 | if (!dma_mask) |
| 247 | dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK; |
| 248 | |
| 249 | return dma_mask; |
| 250 | } |
| 251 | |
| 252 | static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp) |
| 253 | { |
| 254 | unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp); |
| 255 | |
FUJITA Tomonori | 75bebb7 | 2008-10-23 20:46:55 +0900 | [diff] [blame] | 256 | if (dma_mask <= DMA_24BIT_MASK) |
| 257 | gfp |= GFP_DMA; |
| 258 | #ifdef CONFIG_X86_64 |
FUJITA Tomonori | 823e7e8 | 2008-09-08 18:10:13 +0900 | [diff] [blame] | 259 | if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA)) |
| 260 | gfp |= GFP_DMA32; |
| 261 | #endif |
| 262 | return gfp; |
| 263 | } |
| 264 | |
Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 265 | static inline void * |
| 266 | dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 267 | gfp_t gfp) |
| 268 | { |
| 269 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
| 270 | void *memory; |
Glauber Costa | ae17a63b | 2008-03-25 18:36:38 -0300 | [diff] [blame] | 271 | |
FUJITA Tomonori | 8a53ad6 | 2008-09-08 18:10:12 +0900 | [diff] [blame] | 272 | gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32); |
| 273 | |
Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 274 | if (dma_alloc_from_coherent(dev, size, dma_handle, &memory)) |
| 275 | return memory; |
| 276 | |
| 277 | if (!dev) { |
| 278 | dev = &x86_dma_fallback_dev; |
| 279 | gfp |= GFP_DMA; |
| 280 | } |
| 281 | |
FUJITA Tomonori | 9821626 | 2008-09-10 00:49:48 +0900 | [diff] [blame] | 282 | if (!is_device_dma_capable(dev)) |
FUJITA Tomonori | de9f521 | 2008-09-08 18:10:11 +0900 | [diff] [blame] | 283 | return NULL; |
| 284 | |
FUJITA Tomonori | 823e7e8 | 2008-09-08 18:10:13 +0900 | [diff] [blame] | 285 | if (!ops->alloc_coherent) |
| 286 | return NULL; |
| 287 | |
| 288 | return ops->alloc_coherent(dev, size, dma_handle, |
| 289 | dma_alloc_coherent_gfp_flags(dev, gfp)); |
Joerg Roedel | 6c505ce | 2008-08-19 16:32:45 +0200 | [diff] [blame] | 290 | } |
| 291 | |
| 292 | static inline void dma_free_coherent(struct device *dev, size_t size, |
| 293 | void *vaddr, dma_addr_t bus) |
| 294 | { |
| 295 | struct dma_mapping_ops *ops = get_dma_ops(dev); |
| 296 | |
| 297 | WARN_ON(irqs_disabled()); /* for portability */ |
| 298 | |
| 299 | if (dma_release_from_coherent(dev, get_order(size), vaddr)) |
| 300 | return; |
| 301 | |
| 302 | if (ops->free_coherent) |
| 303 | ops->free_coherent(dev, size, vaddr, bus); |
| 304 | } |
| 305 | |
Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 306 | #endif |