Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 1 | #ifndef _ASM_DMA_MAPPING_H_ |
| 2 | #define _ASM_DMA_MAPPING_H_ |
| 3 | |
| 4 | /* |
| 5 | * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for |
| 6 | * documentation. |
| 7 | */ |
| 8 | |
| 9 | #include <linux/scatterlist.h> |
| 10 | #include <asm/io.h> |
| 11 | #include <asm/swiotlb.h> |
| 12 | |
Glauber Costa | 7c18341 | 2008-03-25 18:36:36 -0300 | [diff] [blame] | 13 | extern dma_addr_t bad_dma_address; |
Glauber Costa | b7107a3 | 2008-03-25 18:36:39 -0300 | [diff] [blame] | 14 | extern int iommu_merge; |
| 15 | extern struct device fallback_dev; |
| 16 | extern int panic_on_overflow; |
Glauber Costa | bca5c09 | 2008-04-08 13:20:53 -0300 | [diff] [blame] | 17 | extern int forbid_dac; |
Glauber Costa | fae9a0d | 2008-04-08 13:20:56 -0300 | [diff] [blame] | 18 | extern int force_iommu; |
Glauber Costa | 7c18341 | 2008-03-25 18:36:36 -0300 | [diff] [blame] | 19 | |
Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 20 | struct dma_mapping_ops { |
| 21 | int (*mapping_error)(dma_addr_t dma_addr); |
| 22 | void* (*alloc_coherent)(struct device *dev, size_t size, |
| 23 | dma_addr_t *dma_handle, gfp_t gfp); |
| 24 | void (*free_coherent)(struct device *dev, size_t size, |
| 25 | void *vaddr, dma_addr_t dma_handle); |
Ingo Molnar | 2be6214 | 2008-04-19 19:19:56 +0200 | [diff] [blame] | 26 | dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr, |
Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 27 | size_t size, int direction); |
| 28 | /* like map_single, but doesn't check the device mask */ |
Ingo Molnar | 2be6214 | 2008-04-19 19:19:56 +0200 | [diff] [blame] | 29 | dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr, |
Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 30 | size_t size, int direction); |
| 31 | void (*unmap_single)(struct device *dev, dma_addr_t addr, |
| 32 | size_t size, int direction); |
| 33 | void (*sync_single_for_cpu)(struct device *hwdev, |
| 34 | dma_addr_t dma_handle, size_t size, |
| 35 | int direction); |
| 36 | void (*sync_single_for_device)(struct device *hwdev, |
| 37 | dma_addr_t dma_handle, size_t size, |
| 38 | int direction); |
| 39 | void (*sync_single_range_for_cpu)(struct device *hwdev, |
| 40 | dma_addr_t dma_handle, unsigned long offset, |
| 41 | size_t size, int direction); |
| 42 | void (*sync_single_range_for_device)(struct device *hwdev, |
| 43 | dma_addr_t dma_handle, unsigned long offset, |
| 44 | size_t size, int direction); |
| 45 | void (*sync_sg_for_cpu)(struct device *hwdev, |
| 46 | struct scatterlist *sg, int nelems, |
| 47 | int direction); |
| 48 | void (*sync_sg_for_device)(struct device *hwdev, |
| 49 | struct scatterlist *sg, int nelems, |
| 50 | int direction); |
| 51 | int (*map_sg)(struct device *hwdev, struct scatterlist *sg, |
| 52 | int nents, int direction); |
| 53 | void (*unmap_sg)(struct device *hwdev, |
| 54 | struct scatterlist *sg, int nents, |
| 55 | int direction); |
| 56 | int (*dma_supported)(struct device *hwdev, u64 mask); |
| 57 | int is_phys; |
| 58 | }; |
| 59 | |
Glauber Costa | 22456b9 | 2008-03-25 18:36:21 -0300 | [diff] [blame] | 60 | extern const struct dma_mapping_ops *dma_ops; |
| 61 | |
Glauber Costa | c786df0 | 2008-03-25 18:36:37 -0300 | [diff] [blame] | 62 | static inline int dma_mapping_error(dma_addr_t dma_addr) |
| 63 | { |
| 64 | if (dma_ops->mapping_error) |
| 65 | return dma_ops->mapping_error(dma_addr); |
| 66 | |
| 67 | return (dma_addr == bad_dma_address); |
| 68 | } |
| 69 | |
Glauber Costa | 8d396de | 2008-03-25 18:36:31 -0300 | [diff] [blame] | 70 | #define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f) |
| 71 | #define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h) |
| 72 | |
| 73 | void *dma_alloc_coherent(struct device *dev, size_t size, |
| 74 | dma_addr_t *dma_handle, gfp_t flag); |
| 75 | |
| 76 | void dma_free_coherent(struct device *dev, size_t size, |
| 77 | void *vaddr, dma_addr_t dma_handle); |
| 78 | |
| 79 | |
Glauber Costa | 802c1f6 | 2008-03-25 18:36:34 -0300 | [diff] [blame] | 80 | extern int dma_supported(struct device *hwdev, u64 mask); |
| 81 | extern int dma_set_mask(struct device *dev, u64 mask); |
| 82 | |
Glauber Costa | 22456b9 | 2008-03-25 18:36:21 -0300 | [diff] [blame] | 83 | static inline dma_addr_t |
| 84 | dma_map_single(struct device *hwdev, void *ptr, size_t size, |
| 85 | int direction) |
| 86 | { |
| 87 | BUG_ON(!valid_dma_direction(direction)); |
Ingo Molnar | 2be6214 | 2008-04-19 19:19:56 +0200 | [diff] [blame] | 88 | return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction); |
Glauber Costa | 22456b9 | 2008-03-25 18:36:21 -0300 | [diff] [blame] | 89 | } |
| 90 | |
Glauber Costa | 0cb0ae6 | 2008-03-25 18:36:22 -0300 | [diff] [blame] | 91 | static inline void |
| 92 | dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size, |
| 93 | int direction) |
| 94 | { |
| 95 | BUG_ON(!valid_dma_direction(direction)); |
| 96 | if (dma_ops->unmap_single) |
| 97 | dma_ops->unmap_single(dev, addr, size, direction); |
| 98 | } |
| 99 | |
Glauber Costa | 16a3ce9 | 2008-03-25 18:36:23 -0300 | [diff] [blame] | 100 | static inline int |
| 101 | dma_map_sg(struct device *hwdev, struct scatterlist *sg, |
| 102 | int nents, int direction) |
| 103 | { |
| 104 | BUG_ON(!valid_dma_direction(direction)); |
| 105 | return dma_ops->map_sg(hwdev, sg, nents, direction); |
| 106 | } |
Glauber Costa | 72c784f | 2008-03-25 18:36:24 -0300 | [diff] [blame] | 107 | |
| 108 | static inline void |
| 109 | dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents, |
| 110 | int direction) |
| 111 | { |
| 112 | BUG_ON(!valid_dma_direction(direction)); |
| 113 | if (dma_ops->unmap_sg) |
| 114 | dma_ops->unmap_sg(hwdev, sg, nents, direction); |
| 115 | } |
Glauber Costa | c01dd8c | 2008-03-25 18:36:25 -0300 | [diff] [blame] | 116 | |
| 117 | static inline void |
| 118 | dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
| 119 | size_t size, int direction) |
| 120 | { |
| 121 | BUG_ON(!valid_dma_direction(direction)); |
| 122 | if (dma_ops->sync_single_for_cpu) |
| 123 | dma_ops->sync_single_for_cpu(hwdev, dma_handle, size, |
| 124 | direction); |
| 125 | flush_write_buffers(); |
| 126 | } |
| 127 | |
Glauber Costa | 9231b26 | 2008-03-25 18:36:26 -0300 | [diff] [blame] | 128 | static inline void |
| 129 | dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle, |
| 130 | size_t size, int direction) |
| 131 | { |
| 132 | BUG_ON(!valid_dma_direction(direction)); |
| 133 | if (dma_ops->sync_single_for_device) |
| 134 | dma_ops->sync_single_for_device(hwdev, dma_handle, size, |
| 135 | direction); |
| 136 | flush_write_buffers(); |
| 137 | } |
| 138 | |
Glauber Costa | 627610f | 2008-03-25 18:36:27 -0300 | [diff] [blame] | 139 | static inline void |
| 140 | dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle, |
| 141 | unsigned long offset, size_t size, int direction) |
| 142 | { |
| 143 | BUG_ON(!valid_dma_direction(direction)); |
| 144 | if (dma_ops->sync_single_range_for_cpu) |
| 145 | dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset, |
| 146 | size, direction); |
| 147 | |
| 148 | flush_write_buffers(); |
| 149 | } |
Glauber Costa | 7136233 | 2008-03-25 18:36:28 -0300 | [diff] [blame] | 150 | |
| 151 | static inline void |
| 152 | dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle, |
| 153 | unsigned long offset, size_t size, |
| 154 | int direction) |
| 155 | { |
| 156 | BUG_ON(!valid_dma_direction(direction)); |
| 157 | if (dma_ops->sync_single_range_for_device) |
| 158 | dma_ops->sync_single_range_for_device(hwdev, dma_handle, |
| 159 | offset, size, direction); |
| 160 | |
| 161 | flush_write_buffers(); |
| 162 | } |
| 163 | |
Glauber Costa | ed435de | 2008-03-25 18:36:29 -0300 | [diff] [blame] | 164 | static inline void |
| 165 | dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg, |
| 166 | int nelems, int direction) |
| 167 | { |
| 168 | BUG_ON(!valid_dma_direction(direction)); |
| 169 | if (dma_ops->sync_sg_for_cpu) |
| 170 | dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction); |
| 171 | flush_write_buffers(); |
| 172 | } |
Glauber Costa | e7f3a91 | 2008-03-25 18:36:30 -0300 | [diff] [blame] | 173 | |
| 174 | static inline void |
| 175 | dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg, |
| 176 | int nelems, int direction) |
| 177 | { |
| 178 | BUG_ON(!valid_dma_direction(direction)); |
| 179 | if (dma_ops->sync_sg_for_device) |
| 180 | dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction); |
| 181 | |
| 182 | flush_write_buffers(); |
| 183 | } |
Glauber Costa | 4d92fbf | 2008-03-25 18:36:32 -0300 | [diff] [blame] | 184 | |
| 185 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
| 186 | size_t offset, size_t size, |
| 187 | int direction) |
| 188 | { |
Ingo Molnar | 2be6214 | 2008-04-19 19:19:56 +0200 | [diff] [blame] | 189 | BUG_ON(!valid_dma_direction(direction)); |
| 190 | return dma_ops->map_single(dev, page_to_phys(page)+offset, |
| 191 | size, direction); |
Glauber Costa | 4d92fbf | 2008-03-25 18:36:32 -0300 | [diff] [blame] | 192 | } |
| 193 | |
| 194 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
| 195 | size_t size, int direction) |
| 196 | { |
| 197 | dma_unmap_single(dev, addr, size, direction); |
| 198 | } |
| 199 | |
Glauber Costa | 3cb6a91 | 2008-03-25 18:36:33 -0300 | [diff] [blame] | 200 | static inline void |
| 201 | dma_cache_sync(struct device *dev, void *vaddr, size_t size, |
| 202 | enum dma_data_direction dir) |
| 203 | { |
| 204 | flush_write_buffers(); |
| 205 | } |
Glauber Costa | ae17a63b | 2008-03-25 18:36:38 -0300 | [diff] [blame] | 206 | |
Glauber Costa | b7107a3 | 2008-03-25 18:36:39 -0300 | [diff] [blame] | 207 | static inline int dma_get_cache_alignment(void) |
| 208 | { |
| 209 | /* no easy way to get cache size on all x86, so return the |
| 210 | * maximum possible, to be safe */ |
| 211 | return boot_cpu_data.x86_clflush_size; |
| 212 | } |
| 213 | |
| 214 | #define dma_is_consistent(d, h) (1) |
| 215 | |
Glauber Costa | ae17a63b | 2008-03-25 18:36:38 -0300 | [diff] [blame] | 216 | #ifdef CONFIG_X86_32 |
| 217 | # define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY |
Glauber Costa | 8e8edc6 | 2008-04-08 13:20:57 -0300 | [diff] [blame] | 218 | struct dma_coherent_mem { |
| 219 | void *virt_base; |
| 220 | u32 device_base; |
| 221 | int size; |
| 222 | int flags; |
| 223 | unsigned long *bitmap; |
| 224 | }; |
| 225 | |
Glauber Costa | ae17a63b | 2008-03-25 18:36:38 -0300 | [diff] [blame] | 226 | extern int |
| 227 | dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr, |
| 228 | dma_addr_t device_addr, size_t size, int flags); |
| 229 | |
| 230 | extern void |
| 231 | dma_release_declared_memory(struct device *dev); |
| 232 | |
| 233 | extern void * |
| 234 | dma_mark_declared_memory_occupied(struct device *dev, |
| 235 | dma_addr_t device_addr, size_t size); |
| 236 | #endif /* CONFIG_X86_32 */ |
Glauber Costa | 6f53663 | 2008-03-25 18:36:20 -0300 | [diff] [blame] | 237 | #endif |