FUJITA Tomonori | c147d8e | 2009-06-17 16:28:10 -0700 | [diff] [blame] | 1 | #ifndef _ASM_GENERIC_DMA_MAPPING_H |
| 2 | #define _ASM_GENERIC_DMA_MAPPING_H |
| 3 | |
| 4 | #include <linux/kmemcheck.h> |
Paul Gortmaker | 187f188 | 2011-11-23 20:12:59 -0500 | [diff] [blame] | 5 | #include <linux/bug.h> |
FUJITA Tomonori | c147d8e | 2009-06-17 16:28:10 -0700 | [diff] [blame] | 6 | #include <linux/scatterlist.h> |
| 7 | #include <linux/dma-debug.h> |
| 8 | #include <linux/dma-attrs.h> |
Christoph Hellwig | 6894258 | 2015-09-09 15:39:39 -0700 | [diff] [blame] | 9 | #include <asm-generic/dma-coherent.h> |
FUJITA Tomonori | c147d8e | 2009-06-17 16:28:10 -0700 | [diff] [blame] | 10 | |
| 11 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, |
| 12 | size_t size, |
| 13 | enum dma_data_direction dir, |
| 14 | struct dma_attrs *attrs) |
| 15 | { |
| 16 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 17 | dma_addr_t addr; |
| 18 | |
| 19 | kmemcheck_mark_initialized(ptr, size); |
| 20 | BUG_ON(!valid_dma_direction(dir)); |
| 21 | addr = ops->map_page(dev, virt_to_page(ptr), |
| 22 | (unsigned long)ptr & ~PAGE_MASK, size, |
| 23 | dir, attrs); |
| 24 | debug_dma_map_page(dev, virt_to_page(ptr), |
| 25 | (unsigned long)ptr & ~PAGE_MASK, size, |
| 26 | dir, addr, true); |
| 27 | return addr; |
| 28 | } |
| 29 | |
| 30 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, |
| 31 | size_t size, |
| 32 | enum dma_data_direction dir, |
| 33 | struct dma_attrs *attrs) |
| 34 | { |
| 35 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 36 | |
| 37 | BUG_ON(!valid_dma_direction(dir)); |
| 38 | if (ops->unmap_page) |
| 39 | ops->unmap_page(dev, addr, size, dir, attrs); |
| 40 | debug_dma_unmap_page(dev, addr, size, dir, true); |
| 41 | } |
| 42 | |
Ricardo Ribalda Delgado | 8582e26 | 2015-02-11 13:53:14 +0100 | [diff] [blame] | 43 | /* |
| 44 | * dma_maps_sg_attrs returns 0 on error and > 0 on success. |
| 45 | * It should never return a value < 0. |
| 46 | */ |
FUJITA Tomonori | c147d8e | 2009-06-17 16:28:10 -0700 | [diff] [blame] | 47 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
| 48 | int nents, enum dma_data_direction dir, |
| 49 | struct dma_attrs *attrs) |
| 50 | { |
| 51 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 52 | int i, ents; |
| 53 | struct scatterlist *s; |
| 54 | |
| 55 | for_each_sg(sg, s, nents, i) |
| 56 | kmemcheck_mark_initialized(sg_virt(s), s->length); |
| 57 | BUG_ON(!valid_dma_direction(dir)); |
| 58 | ents = ops->map_sg(dev, sg, nents, dir, attrs); |
Ricardo Ribalda Delgado | 8582e26 | 2015-02-11 13:53:14 +0100 | [diff] [blame] | 59 | BUG_ON(ents < 0); |
FUJITA Tomonori | c147d8e | 2009-06-17 16:28:10 -0700 | [diff] [blame] | 60 | debug_dma_map_sg(dev, sg, nents, ents, dir); |
| 61 | |
| 62 | return ents; |
| 63 | } |
| 64 | |
| 65 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
| 66 | int nents, enum dma_data_direction dir, |
| 67 | struct dma_attrs *attrs) |
| 68 | { |
| 69 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 70 | |
| 71 | BUG_ON(!valid_dma_direction(dir)); |
| 72 | debug_dma_unmap_sg(dev, sg, nents, dir); |
| 73 | if (ops->unmap_sg) |
| 74 | ops->unmap_sg(dev, sg, nents, dir, attrs); |
| 75 | } |
| 76 | |
| 77 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
| 78 | size_t offset, size_t size, |
| 79 | enum dma_data_direction dir) |
| 80 | { |
| 81 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 82 | dma_addr_t addr; |
| 83 | |
| 84 | kmemcheck_mark_initialized(page_address(page) + offset, size); |
| 85 | BUG_ON(!valid_dma_direction(dir)); |
| 86 | addr = ops->map_page(dev, page, offset, size, dir, NULL); |
| 87 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
| 88 | |
| 89 | return addr; |
| 90 | } |
| 91 | |
| 92 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
| 93 | size_t size, enum dma_data_direction dir) |
| 94 | { |
| 95 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 96 | |
| 97 | BUG_ON(!valid_dma_direction(dir)); |
| 98 | if (ops->unmap_page) |
| 99 | ops->unmap_page(dev, addr, size, dir, NULL); |
| 100 | debug_dma_unmap_page(dev, addr, size, dir, false); |
| 101 | } |
| 102 | |
| 103 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
| 104 | size_t size, |
| 105 | enum dma_data_direction dir) |
| 106 | { |
| 107 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 108 | |
| 109 | BUG_ON(!valid_dma_direction(dir)); |
| 110 | if (ops->sync_single_for_cpu) |
| 111 | ops->sync_single_for_cpu(dev, addr, size, dir); |
| 112 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); |
FUJITA Tomonori | c147d8e | 2009-06-17 16:28:10 -0700 | [diff] [blame] | 113 | } |
| 114 | |
| 115 | static inline void dma_sync_single_for_device(struct device *dev, |
| 116 | dma_addr_t addr, size_t size, |
| 117 | enum dma_data_direction dir) |
| 118 | { |
| 119 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 120 | |
| 121 | BUG_ON(!valid_dma_direction(dir)); |
| 122 | if (ops->sync_single_for_device) |
| 123 | ops->sync_single_for_device(dev, addr, size, dir); |
| 124 | debug_dma_sync_single_for_device(dev, addr, size, dir); |
FUJITA Tomonori | c147d8e | 2009-06-17 16:28:10 -0700 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
| 128 | dma_addr_t addr, |
| 129 | unsigned long offset, |
| 130 | size_t size, |
| 131 | enum dma_data_direction dir) |
| 132 | { |
Clemens Ladisch | 07a7230 | 2011-10-31 17:06:28 -0700 | [diff] [blame] | 133 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 134 | |
| 135 | BUG_ON(!valid_dma_direction(dir)); |
| 136 | if (ops->sync_single_for_cpu) |
| 137 | ops->sync_single_for_cpu(dev, addr + offset, size, dir); |
| 138 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); |
FUJITA Tomonori | c147d8e | 2009-06-17 16:28:10 -0700 | [diff] [blame] | 139 | } |
| 140 | |
| 141 | static inline void dma_sync_single_range_for_device(struct device *dev, |
| 142 | dma_addr_t addr, |
| 143 | unsigned long offset, |
| 144 | size_t size, |
| 145 | enum dma_data_direction dir) |
| 146 | { |
Clemens Ladisch | 07a7230 | 2011-10-31 17:06:28 -0700 | [diff] [blame] | 147 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 148 | |
| 149 | BUG_ON(!valid_dma_direction(dir)); |
| 150 | if (ops->sync_single_for_device) |
| 151 | ops->sync_single_for_device(dev, addr + offset, size, dir); |
| 152 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); |
FUJITA Tomonori | c147d8e | 2009-06-17 16:28:10 -0700 | [diff] [blame] | 153 | } |
| 154 | |
| 155 | static inline void |
| 156 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
| 157 | int nelems, enum dma_data_direction dir) |
| 158 | { |
| 159 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 160 | |
| 161 | BUG_ON(!valid_dma_direction(dir)); |
| 162 | if (ops->sync_sg_for_cpu) |
| 163 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); |
| 164 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); |
FUJITA Tomonori | c147d8e | 2009-06-17 16:28:10 -0700 | [diff] [blame] | 165 | } |
| 166 | |
| 167 | static inline void |
| 168 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
| 169 | int nelems, enum dma_data_direction dir) |
| 170 | { |
| 171 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 172 | |
| 173 | BUG_ON(!valid_dma_direction(dir)); |
| 174 | if (ops->sync_sg_for_device) |
| 175 | ops->sync_sg_for_device(dev, sg, nelems, dir); |
| 176 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); |
| 177 | |
FUJITA Tomonori | c147d8e | 2009-06-17 16:28:10 -0700 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) |
| 181 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) |
| 182 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) |
| 183 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) |
| 184 | |
Marek Szyprowski | 64ccc9c | 2012-06-14 13:03:04 +0200 | [diff] [blame] | 185 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
| 186 | void *cpu_addr, dma_addr_t dma_addr, size_t size); |
| 187 | |
Laura Abbott | 513510d | 2014-10-09 15:26:40 -0700 | [diff] [blame] | 188 | void *dma_common_contiguous_remap(struct page *page, size_t size, |
| 189 | unsigned long vm_flags, |
| 190 | pgprot_t prot, const void *caller); |
| 191 | |
| 192 | void *dma_common_pages_remap(struct page **pages, size_t size, |
| 193 | unsigned long vm_flags, pgprot_t prot, |
| 194 | const void *caller); |
| 195 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); |
| 196 | |
Marek Szyprowski | 64ccc9c | 2012-06-14 13:03:04 +0200 | [diff] [blame] | 197 | /** |
| 198 | * dma_mmap_attrs - map a coherent DMA allocation into user space |
| 199 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 200 | * @vma: vm_area_struct describing requested user mapping |
| 201 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs |
| 202 | * @handle: device-view address returned from dma_alloc_attrs |
| 203 | * @size: size of memory originally requested in dma_alloc_attrs |
| 204 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs |
| 205 | * |
| 206 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs |
| 207 | * into user space. The coherent DMA buffer must not be freed by the |
| 208 | * driver until the user space mapping has been released. |
| 209 | */ |
| 210 | static inline int |
| 211 | dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, |
| 212 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) |
| 213 | { |
| 214 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 215 | BUG_ON(!ops); |
| 216 | if (ops->mmap) |
| 217 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
| 218 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); |
| 219 | } |
| 220 | |
| 221 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) |
| 222 | |
Marek Szyprowski | d2b7428 | 2012-06-13 10:05:52 +0200 | [diff] [blame] | 223 | int |
| 224 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, |
| 225 | void *cpu_addr, dma_addr_t dma_addr, size_t size); |
| 226 | |
| 227 | static inline int |
| 228 | dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, |
| 229 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) |
| 230 | { |
| 231 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 232 | BUG_ON(!ops); |
| 233 | if (ops->get_sgtable) |
| 234 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, |
| 235 | attrs); |
| 236 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); |
| 237 | } |
| 238 | |
| 239 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) |
| 240 | |
Christoph Hellwig | 6894258 | 2015-09-09 15:39:39 -0700 | [diff] [blame] | 241 | #ifndef arch_dma_alloc_attrs |
| 242 | #define arch_dma_alloc_attrs(dev, flag) (true) |
| 243 | #endif |
| 244 | |
| 245 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, |
| 246 | dma_addr_t *dma_handle, gfp_t flag, |
| 247 | struct dma_attrs *attrs) |
| 248 | { |
| 249 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 250 | void *cpu_addr; |
| 251 | |
| 252 | BUG_ON(!ops); |
| 253 | |
| 254 | if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) |
| 255 | return cpu_addr; |
| 256 | |
| 257 | if (!arch_dma_alloc_attrs(&dev, &flag)) |
| 258 | return NULL; |
| 259 | if (!ops->alloc) |
| 260 | return NULL; |
| 261 | |
| 262 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); |
| 263 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); |
| 264 | return cpu_addr; |
| 265 | } |
| 266 | |
| 267 | static inline void dma_free_attrs(struct device *dev, size_t size, |
| 268 | void *cpu_addr, dma_addr_t dma_handle, |
| 269 | struct dma_attrs *attrs) |
| 270 | { |
| 271 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 272 | |
| 273 | BUG_ON(!ops); |
| 274 | WARN_ON(irqs_disabled()); |
| 275 | |
| 276 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) |
| 277 | return; |
| 278 | |
| 279 | if (!ops->free) |
| 280 | return; |
| 281 | |
| 282 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); |
| 283 | ops->free(dev, size, cpu_addr, dma_handle, attrs); |
| 284 | } |
| 285 | |
| 286 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
| 287 | dma_addr_t *dma_handle, gfp_t flag) |
| 288 | { |
| 289 | return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); |
| 290 | } |
| 291 | |
| 292 | static inline void dma_free_coherent(struct device *dev, size_t size, |
| 293 | void *cpu_addr, dma_addr_t dma_handle) |
| 294 | { |
| 295 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); |
| 296 | } |
| 297 | |
Christoph Hellwig | 1e89375 | 2015-09-09 15:39:42 -0700 | [diff] [blame] | 298 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
| 299 | dma_addr_t *dma_handle, gfp_t gfp) |
| 300 | { |
| 301 | DEFINE_DMA_ATTRS(attrs); |
| 302 | |
| 303 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); |
| 304 | return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); |
| 305 | } |
| 306 | |
| 307 | static inline void dma_free_noncoherent(struct device *dev, size_t size, |
| 308 | void *cpu_addr, dma_addr_t dma_handle) |
| 309 | { |
| 310 | DEFINE_DMA_ATTRS(attrs); |
| 311 | |
| 312 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); |
| 313 | dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); |
| 314 | } |
| 315 | |
Christoph Hellwig | efa21e4 | 2015-09-09 15:39:46 -0700 | [diff] [blame] | 316 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 317 | { |
| 318 | debug_dma_mapping_error(dev, dma_addr); |
| 319 | |
| 320 | if (get_dma_ops(dev)->mapping_error) |
| 321 | return get_dma_ops(dev)->mapping_error(dev, dma_addr); |
| 322 | |
| 323 | #ifdef DMA_ERROR_CODE |
| 324 | return dma_addr == DMA_ERROR_CODE; |
| 325 | #else |
| 326 | return 0; |
| 327 | #endif |
| 328 | } |
| 329 | |
Christoph Hellwig | ee19637 | 2015-09-09 15:39:49 -0700 | [diff] [blame] | 330 | #ifndef HAVE_ARCH_DMA_SUPPORTED |
| 331 | static inline int dma_supported(struct device *dev, u64 mask) |
| 332 | { |
| 333 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 334 | |
| 335 | if (!ops) |
| 336 | return 0; |
| 337 | if (!ops->dma_supported) |
| 338 | return 1; |
| 339 | return ops->dma_supported(dev, mask); |
| 340 | } |
| 341 | #endif |
| 342 | |
Christoph Hellwig | 452e06a | 2015-09-09 15:39:53 -0700 | [diff] [blame] | 343 | #ifndef HAVE_ARCH_DMA_SET_MASK |
| 344 | static inline int dma_set_mask(struct device *dev, u64 mask) |
| 345 | { |
| 346 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 347 | |
| 348 | if (ops->set_dma_mask) |
| 349 | return ops->set_dma_mask(dev, mask); |
| 350 | |
| 351 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
| 352 | return -EIO; |
| 353 | *dev->dma_mask = mask; |
| 354 | return 0; |
| 355 | } |
| 356 | #endif |
| 357 | |
FUJITA Tomonori | c147d8e | 2009-06-17 16:28:10 -0700 | [diff] [blame] | 358 | #endif |