Robert P. J. Day | 96532ba | 2008-02-03 15:06:26 +0200 | [diff] [blame] | 1 | #ifndef _LINUX_DMA_MAPPING_H |
| 2 | #define _LINUX_DMA_MAPPING_H |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | |
Robin Murphy | 002edb6 | 2015-11-06 16:32:51 -0800 | [diff] [blame] | 4 | #include <linux/sizes.h> |
Andrew Morton | 842fa69 | 2011-11-02 13:39:33 -0700 | [diff] [blame] | 5 | #include <linux/string.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <linux/device.h> |
| 7 | #include <linux/err.h> |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 8 | #include <linux/dma-attrs.h> |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 9 | #include <linux/dma-debug.h> |
Alexey Dobriyan | b7f080c | 2011-06-16 11:01:34 +0000 | [diff] [blame] | 10 | #include <linux/dma-direction.h> |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 11 | #include <linux/scatterlist.h> |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 12 | #include <linux/kmemcheck.h> |
| 13 | #include <linux/bug.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
Bjorn Helgaas | 77f2ea2 | 2014-04-30 11:20:53 -0600 | [diff] [blame] | 15 | /* |
| 16 | * A dma_addr_t can hold any valid DMA or bus address for the platform. |
| 17 | * It can be given to a device to use as a DMA source or target. A CPU cannot |
| 18 | * reference a dma_addr_t directly because there may be translation between |
| 19 | * its physical address space and the bus address space. |
| 20 | */ |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 21 | struct dma_map_ops { |
Marek Szyprowski | 613c457 | 2012-03-28 16:36:27 +0200 | [diff] [blame] | 22 | void* (*alloc)(struct device *dev, size_t size, |
| 23 | dma_addr_t *dma_handle, gfp_t gfp, |
| 24 | struct dma_attrs *attrs); |
| 25 | void (*free)(struct device *dev, size_t size, |
| 26 | void *vaddr, dma_addr_t dma_handle, |
| 27 | struct dma_attrs *attrs); |
Marek Szyprowski | 9adc537 | 2011-12-21 16:55:33 +0100 | [diff] [blame] | 28 | int (*mmap)(struct device *, struct vm_area_struct *, |
| 29 | void *, dma_addr_t, size_t, struct dma_attrs *attrs); |
| 30 | |
Marek Szyprowski | d2b7428 | 2012-06-13 10:05:52 +0200 | [diff] [blame] | 31 | int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *, |
| 32 | dma_addr_t, size_t, struct dma_attrs *attrs); |
| 33 | |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 34 | dma_addr_t (*map_page)(struct device *dev, struct page *page, |
| 35 | unsigned long offset, size_t size, |
| 36 | enum dma_data_direction dir, |
| 37 | struct dma_attrs *attrs); |
| 38 | void (*unmap_page)(struct device *dev, dma_addr_t dma_handle, |
| 39 | size_t size, enum dma_data_direction dir, |
| 40 | struct dma_attrs *attrs); |
Ricardo Ribalda Delgado | 04abab6 | 2015-02-11 13:53:15 +0100 | [diff] [blame] | 41 | /* |
| 42 | * map_sg returns 0 on error and a value > 0 on success. |
| 43 | * It should never return a value < 0. |
| 44 | */ |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 45 | int (*map_sg)(struct device *dev, struct scatterlist *sg, |
| 46 | int nents, enum dma_data_direction dir, |
| 47 | struct dma_attrs *attrs); |
| 48 | void (*unmap_sg)(struct device *dev, |
| 49 | struct scatterlist *sg, int nents, |
| 50 | enum dma_data_direction dir, |
| 51 | struct dma_attrs *attrs); |
| 52 | void (*sync_single_for_cpu)(struct device *dev, |
| 53 | dma_addr_t dma_handle, size_t size, |
| 54 | enum dma_data_direction dir); |
| 55 | void (*sync_single_for_device)(struct device *dev, |
| 56 | dma_addr_t dma_handle, size_t size, |
| 57 | enum dma_data_direction dir); |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 58 | void (*sync_sg_for_cpu)(struct device *dev, |
| 59 | struct scatterlist *sg, int nents, |
| 60 | enum dma_data_direction dir); |
| 61 | void (*sync_sg_for_device)(struct device *dev, |
| 62 | struct scatterlist *sg, int nents, |
| 63 | enum dma_data_direction dir); |
| 64 | int (*mapping_error)(struct device *dev, dma_addr_t dma_addr); |
| 65 | int (*dma_supported)(struct device *dev, u64 mask); |
FUJITA Tomonori | f726f30e | 2009-08-04 19:08:24 +0000 | [diff] [blame] | 66 | int (*set_dma_mask)(struct device *dev, u64 mask); |
Milton Miller | 3a8f755 | 2011-06-24 09:05:23 +0000 | [diff] [blame] | 67 | #ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK |
| 68 | u64 (*get_required_mask)(struct device *dev); |
| 69 | #endif |
FUJITA Tomonori | f0402a2 | 2009-01-05 23:59:01 +0900 | [diff] [blame] | 70 | int is_phys; |
| 71 | }; |
| 72 | |
Christian Borntraeger | a8463d4 | 2016-02-02 21:46:32 -0800 | [diff] [blame] | 73 | extern struct dma_map_ops dma_noop_ops; |
| 74 | |
Andrew Morton | 8f286c3 | 2007-10-18 03:05:07 -0700 | [diff] [blame] | 75 | #define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1)) |
Borislav Petkov | 34c6538 | 2007-10-18 03:05:06 -0700 | [diff] [blame] | 76 | |
James Bottomley | 32e8f70 | 2007-10-16 01:23:55 -0700 | [diff] [blame] | 77 | #define DMA_MASK_NONE 0x0ULL |
| 78 | |
Rolf Eike Beer | d6bd3a3 | 2006-09-29 01:59:48 -0700 | [diff] [blame] | 79 | static inline int valid_dma_direction(int dma_direction) |
| 80 | { |
| 81 | return ((dma_direction == DMA_BIDIRECTIONAL) || |
| 82 | (dma_direction == DMA_TO_DEVICE) || |
| 83 | (dma_direction == DMA_FROM_DEVICE)); |
| 84 | } |
| 85 | |
James Bottomley | 32e8f70 | 2007-10-16 01:23:55 -0700 | [diff] [blame] | 86 | static inline int is_device_dma_capable(struct device *dev) |
| 87 | { |
| 88 | return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE; |
| 89 | } |
| 90 | |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 91 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
| 92 | /* |
| 93 | * These three functions are only for dma allocator. |
| 94 | * Don't use them in device drivers. |
| 95 | */ |
| 96 | int dma_alloc_from_coherent(struct device *dev, ssize_t size, |
| 97 | dma_addr_t *dma_handle, void **ret); |
| 98 | int dma_release_from_coherent(struct device *dev, int order, void *vaddr); |
| 99 | |
| 100 | int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma, |
| 101 | void *cpu_addr, size_t size, int *ret); |
| 102 | #else |
| 103 | #define dma_alloc_from_coherent(dev, size, handle, ret) (0) |
| 104 | #define dma_release_from_coherent(dev, order, vaddr) (0) |
| 105 | #define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0) |
| 106 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
| 107 | |
Dan Williams | 1b0fac4 | 2007-07-15 23:40:26 -0700 | [diff] [blame] | 108 | #ifdef CONFIG_HAS_DMA |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 109 | #include <asm/dma-mapping.h> |
Dan Williams | 1b0fac4 | 2007-07-15 23:40:26 -0700 | [diff] [blame] | 110 | #else |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 111 | /* |
| 112 | * Define the dma api to allow compilation but not linking of |
| 113 | * dma dependent code. Code that depends on the dma-mapping |
| 114 | * API needs to set 'depends on HAS_DMA' in its Kconfig |
| 115 | */ |
| 116 | extern struct dma_map_ops bad_dma_ops; |
| 117 | static inline struct dma_map_ops *get_dma_ops(struct device *dev) |
| 118 | { |
| 119 | return &bad_dma_ops; |
| 120 | } |
| 121 | #endif |
| 122 | |
| 123 | static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr, |
| 124 | size_t size, |
| 125 | enum dma_data_direction dir, |
| 126 | struct dma_attrs *attrs) |
| 127 | { |
| 128 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 129 | dma_addr_t addr; |
| 130 | |
| 131 | kmemcheck_mark_initialized(ptr, size); |
| 132 | BUG_ON(!valid_dma_direction(dir)); |
| 133 | addr = ops->map_page(dev, virt_to_page(ptr), |
Geliang Tang | 8e99469 | 2016-01-20 15:02:12 -0800 | [diff] [blame] | 134 | offset_in_page(ptr), size, |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 135 | dir, attrs); |
| 136 | debug_dma_map_page(dev, virt_to_page(ptr), |
Geliang Tang | 8e99469 | 2016-01-20 15:02:12 -0800 | [diff] [blame] | 137 | offset_in_page(ptr), size, |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 138 | dir, addr, true); |
| 139 | return addr; |
| 140 | } |
| 141 | |
| 142 | static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr, |
| 143 | size_t size, |
| 144 | enum dma_data_direction dir, |
| 145 | struct dma_attrs *attrs) |
| 146 | { |
| 147 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 148 | |
| 149 | BUG_ON(!valid_dma_direction(dir)); |
| 150 | if (ops->unmap_page) |
| 151 | ops->unmap_page(dev, addr, size, dir, attrs); |
| 152 | debug_dma_unmap_page(dev, addr, size, dir, true); |
| 153 | } |
| 154 | |
| 155 | /* |
| 156 | * dma_maps_sg_attrs returns 0 on error and > 0 on success. |
| 157 | * It should never return a value < 0. |
| 158 | */ |
| 159 | static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
| 160 | int nents, enum dma_data_direction dir, |
| 161 | struct dma_attrs *attrs) |
| 162 | { |
| 163 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 164 | int i, ents; |
| 165 | struct scatterlist *s; |
| 166 | |
| 167 | for_each_sg(sg, s, nents, i) |
| 168 | kmemcheck_mark_initialized(sg_virt(s), s->length); |
| 169 | BUG_ON(!valid_dma_direction(dir)); |
| 170 | ents = ops->map_sg(dev, sg, nents, dir, attrs); |
| 171 | BUG_ON(ents < 0); |
| 172 | debug_dma_map_sg(dev, sg, nents, ents, dir); |
| 173 | |
| 174 | return ents; |
| 175 | } |
| 176 | |
| 177 | static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
| 178 | int nents, enum dma_data_direction dir, |
| 179 | struct dma_attrs *attrs) |
| 180 | { |
| 181 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 182 | |
| 183 | BUG_ON(!valid_dma_direction(dir)); |
| 184 | debug_dma_unmap_sg(dev, sg, nents, dir); |
| 185 | if (ops->unmap_sg) |
| 186 | ops->unmap_sg(dev, sg, nents, dir, attrs); |
| 187 | } |
| 188 | |
| 189 | static inline dma_addr_t dma_map_page(struct device *dev, struct page *page, |
| 190 | size_t offset, size_t size, |
| 191 | enum dma_data_direction dir) |
| 192 | { |
| 193 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 194 | dma_addr_t addr; |
| 195 | |
| 196 | kmemcheck_mark_initialized(page_address(page) + offset, size); |
| 197 | BUG_ON(!valid_dma_direction(dir)); |
| 198 | addr = ops->map_page(dev, page, offset, size, dir, NULL); |
| 199 | debug_dma_map_page(dev, page, offset, size, dir, addr, false); |
| 200 | |
| 201 | return addr; |
| 202 | } |
| 203 | |
| 204 | static inline void dma_unmap_page(struct device *dev, dma_addr_t addr, |
| 205 | size_t size, enum dma_data_direction dir) |
| 206 | { |
| 207 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 208 | |
| 209 | BUG_ON(!valid_dma_direction(dir)); |
| 210 | if (ops->unmap_page) |
| 211 | ops->unmap_page(dev, addr, size, dir, NULL); |
| 212 | debug_dma_unmap_page(dev, addr, size, dir, false); |
| 213 | } |
| 214 | |
| 215 | static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, |
| 216 | size_t size, |
| 217 | enum dma_data_direction dir) |
| 218 | { |
| 219 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 220 | |
| 221 | BUG_ON(!valid_dma_direction(dir)); |
| 222 | if (ops->sync_single_for_cpu) |
| 223 | ops->sync_single_for_cpu(dev, addr, size, dir); |
| 224 | debug_dma_sync_single_for_cpu(dev, addr, size, dir); |
| 225 | } |
| 226 | |
| 227 | static inline void dma_sync_single_for_device(struct device *dev, |
| 228 | dma_addr_t addr, size_t size, |
| 229 | enum dma_data_direction dir) |
| 230 | { |
| 231 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 232 | |
| 233 | BUG_ON(!valid_dma_direction(dir)); |
| 234 | if (ops->sync_single_for_device) |
| 235 | ops->sync_single_for_device(dev, addr, size, dir); |
| 236 | debug_dma_sync_single_for_device(dev, addr, size, dir); |
| 237 | } |
| 238 | |
| 239 | static inline void dma_sync_single_range_for_cpu(struct device *dev, |
| 240 | dma_addr_t addr, |
| 241 | unsigned long offset, |
| 242 | size_t size, |
| 243 | enum dma_data_direction dir) |
| 244 | { |
| 245 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 246 | |
| 247 | BUG_ON(!valid_dma_direction(dir)); |
| 248 | if (ops->sync_single_for_cpu) |
| 249 | ops->sync_single_for_cpu(dev, addr + offset, size, dir); |
| 250 | debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir); |
| 251 | } |
| 252 | |
| 253 | static inline void dma_sync_single_range_for_device(struct device *dev, |
| 254 | dma_addr_t addr, |
| 255 | unsigned long offset, |
| 256 | size_t size, |
| 257 | enum dma_data_direction dir) |
| 258 | { |
| 259 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 260 | |
| 261 | BUG_ON(!valid_dma_direction(dir)); |
| 262 | if (ops->sync_single_for_device) |
| 263 | ops->sync_single_for_device(dev, addr + offset, size, dir); |
| 264 | debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir); |
| 265 | } |
| 266 | |
| 267 | static inline void |
| 268 | dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
| 269 | int nelems, enum dma_data_direction dir) |
| 270 | { |
| 271 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 272 | |
| 273 | BUG_ON(!valid_dma_direction(dir)); |
| 274 | if (ops->sync_sg_for_cpu) |
| 275 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); |
| 276 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir); |
| 277 | } |
| 278 | |
| 279 | static inline void |
| 280 | dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
| 281 | int nelems, enum dma_data_direction dir) |
| 282 | { |
| 283 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 284 | |
| 285 | BUG_ON(!valid_dma_direction(dir)); |
| 286 | if (ops->sync_sg_for_device) |
| 287 | ops->sync_sg_for_device(dev, sg, nelems, dir); |
| 288 | debug_dma_sync_sg_for_device(dev, sg, nelems, dir); |
| 289 | |
| 290 | } |
| 291 | |
| 292 | #define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL) |
| 293 | #define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL) |
| 294 | #define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL) |
| 295 | #define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL) |
| 296 | |
| 297 | extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma, |
| 298 | void *cpu_addr, dma_addr_t dma_addr, size_t size); |
| 299 | |
| 300 | void *dma_common_contiguous_remap(struct page *page, size_t size, |
| 301 | unsigned long vm_flags, |
| 302 | pgprot_t prot, const void *caller); |
| 303 | |
| 304 | void *dma_common_pages_remap(struct page **pages, size_t size, |
| 305 | unsigned long vm_flags, pgprot_t prot, |
| 306 | const void *caller); |
| 307 | void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags); |
| 308 | |
| 309 | /** |
| 310 | * dma_mmap_attrs - map a coherent DMA allocation into user space |
| 311 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 312 | * @vma: vm_area_struct describing requested user mapping |
| 313 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs |
| 314 | * @handle: device-view address returned from dma_alloc_attrs |
| 315 | * @size: size of memory originally requested in dma_alloc_attrs |
| 316 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs |
| 317 | * |
| 318 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs |
| 319 | * into user space. The coherent DMA buffer must not be freed by the |
| 320 | * driver until the user space mapping has been released. |
| 321 | */ |
| 322 | static inline int |
| 323 | dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr, |
| 324 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) |
| 325 | { |
| 326 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 327 | BUG_ON(!ops); |
| 328 | if (ops->mmap) |
| 329 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
| 330 | return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size); |
| 331 | } |
| 332 | |
| 333 | #define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL) |
| 334 | |
| 335 | int |
| 336 | dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, |
| 337 | void *cpu_addr, dma_addr_t dma_addr, size_t size); |
| 338 | |
| 339 | static inline int |
| 340 | dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr, |
| 341 | dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs) |
| 342 | { |
| 343 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 344 | BUG_ON(!ops); |
| 345 | if (ops->get_sgtable) |
| 346 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, |
| 347 | attrs); |
| 348 | return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size); |
| 349 | } |
| 350 | |
| 351 | #define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL) |
| 352 | |
| 353 | #ifndef arch_dma_alloc_attrs |
| 354 | #define arch_dma_alloc_attrs(dev, flag) (true) |
| 355 | #endif |
| 356 | |
| 357 | static inline void *dma_alloc_attrs(struct device *dev, size_t size, |
| 358 | dma_addr_t *dma_handle, gfp_t flag, |
| 359 | struct dma_attrs *attrs) |
| 360 | { |
| 361 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 362 | void *cpu_addr; |
| 363 | |
| 364 | BUG_ON(!ops); |
| 365 | |
| 366 | if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr)) |
| 367 | return cpu_addr; |
| 368 | |
| 369 | if (!arch_dma_alloc_attrs(&dev, &flag)) |
| 370 | return NULL; |
| 371 | if (!ops->alloc) |
| 372 | return NULL; |
| 373 | |
| 374 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); |
| 375 | debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr); |
| 376 | return cpu_addr; |
| 377 | } |
| 378 | |
| 379 | static inline void dma_free_attrs(struct device *dev, size_t size, |
| 380 | void *cpu_addr, dma_addr_t dma_handle, |
| 381 | struct dma_attrs *attrs) |
| 382 | { |
| 383 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 384 | |
| 385 | BUG_ON(!ops); |
| 386 | WARN_ON(irqs_disabled()); |
| 387 | |
| 388 | if (dma_release_from_coherent(dev, get_order(size), cpu_addr)) |
| 389 | return; |
| 390 | |
Zhen Lei | d6b7eae | 2016-03-09 14:08:38 -0800 | [diff] [blame] | 391 | if (!ops->free || !cpu_addr) |
Christoph Hellwig | e1c7e32 | 2016-01-20 15:02:05 -0800 | [diff] [blame] | 392 | return; |
| 393 | |
| 394 | debug_dma_free_coherent(dev, size, cpu_addr, dma_handle); |
| 395 | ops->free(dev, size, cpu_addr, dma_handle, attrs); |
| 396 | } |
| 397 | |
| 398 | static inline void *dma_alloc_coherent(struct device *dev, size_t size, |
| 399 | dma_addr_t *dma_handle, gfp_t flag) |
| 400 | { |
| 401 | return dma_alloc_attrs(dev, size, dma_handle, flag, NULL); |
| 402 | } |
| 403 | |
| 404 | static inline void dma_free_coherent(struct device *dev, size_t size, |
| 405 | void *cpu_addr, dma_addr_t dma_handle) |
| 406 | { |
| 407 | return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL); |
| 408 | } |
| 409 | |
| 410 | static inline void *dma_alloc_noncoherent(struct device *dev, size_t size, |
| 411 | dma_addr_t *dma_handle, gfp_t gfp) |
| 412 | { |
| 413 | DEFINE_DMA_ATTRS(attrs); |
| 414 | |
| 415 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); |
| 416 | return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs); |
| 417 | } |
| 418 | |
| 419 | static inline void dma_free_noncoherent(struct device *dev, size_t size, |
| 420 | void *cpu_addr, dma_addr_t dma_handle) |
| 421 | { |
| 422 | DEFINE_DMA_ATTRS(attrs); |
| 423 | |
| 424 | dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs); |
| 425 | dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs); |
| 426 | } |
| 427 | |
| 428 | static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr) |
| 429 | { |
| 430 | debug_dma_mapping_error(dev, dma_addr); |
| 431 | |
| 432 | if (get_dma_ops(dev)->mapping_error) |
| 433 | return get_dma_ops(dev)->mapping_error(dev, dma_addr); |
| 434 | |
| 435 | #ifdef DMA_ERROR_CODE |
| 436 | return dma_addr == DMA_ERROR_CODE; |
| 437 | #else |
| 438 | return 0; |
| 439 | #endif |
| 440 | } |
| 441 | |
| 442 | #ifndef HAVE_ARCH_DMA_SUPPORTED |
| 443 | static inline int dma_supported(struct device *dev, u64 mask) |
| 444 | { |
| 445 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 446 | |
| 447 | if (!ops) |
| 448 | return 0; |
| 449 | if (!ops->dma_supported) |
| 450 | return 1; |
| 451 | return ops->dma_supported(dev, mask); |
| 452 | } |
| 453 | #endif |
| 454 | |
| 455 | #ifndef HAVE_ARCH_DMA_SET_MASK |
| 456 | static inline int dma_set_mask(struct device *dev, u64 mask) |
| 457 | { |
| 458 | struct dma_map_ops *ops = get_dma_ops(dev); |
| 459 | |
| 460 | if (ops->set_dma_mask) |
| 461 | return ops->set_dma_mask(dev, mask); |
| 462 | |
| 463 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
| 464 | return -EIO; |
| 465 | *dev->dma_mask = mask; |
| 466 | return 0; |
| 467 | } |
Dan Williams | 1b0fac4 | 2007-07-15 23:40:26 -0700 | [diff] [blame] | 468 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 469 | |
FUJITA Tomonori | 589fc9a | 2008-09-12 19:42:34 +0900 | [diff] [blame] | 470 | static inline u64 dma_get_mask(struct device *dev) |
| 471 | { |
FUJITA Tomonori | 07a2c01 | 2008-09-19 02:02:05 +0900 | [diff] [blame] | 472 | if (dev && dev->dma_mask && *dev->dma_mask) |
FUJITA Tomonori | 589fc9a | 2008-09-12 19:42:34 +0900 | [diff] [blame] | 473 | return *dev->dma_mask; |
Yang Hongyang | 284901a | 2009-04-06 19:01:15 -0700 | [diff] [blame] | 474 | return DMA_BIT_MASK(32); |
FUJITA Tomonori | 589fc9a | 2008-09-12 19:42:34 +0900 | [diff] [blame] | 475 | } |
| 476 | |
Rob Herring | 58af4a2 | 2012-03-20 14:33:01 -0500 | [diff] [blame] | 477 | #ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK |
FUJITA Tomonori | 710224f | 2010-09-22 13:04:55 -0700 | [diff] [blame] | 478 | int dma_set_coherent_mask(struct device *dev, u64 mask); |
| 479 | #else |
FUJITA Tomonori | 6a1961f | 2010-03-10 15:23:39 -0800 | [diff] [blame] | 480 | static inline int dma_set_coherent_mask(struct device *dev, u64 mask) |
| 481 | { |
| 482 | if (!dma_supported(dev, mask)) |
| 483 | return -EIO; |
| 484 | dev->coherent_dma_mask = mask; |
| 485 | return 0; |
| 486 | } |
FUJITA Tomonori | 710224f | 2010-09-22 13:04:55 -0700 | [diff] [blame] | 487 | #endif |
FUJITA Tomonori | 6a1961f | 2010-03-10 15:23:39 -0800 | [diff] [blame] | 488 | |
Russell King | 4aa806b | 2013-06-26 13:49:44 +0100 | [diff] [blame] | 489 | /* |
| 490 | * Set both the DMA mask and the coherent DMA mask to the same thing. |
| 491 | * Note that we don't check the return value from dma_set_coherent_mask() |
| 492 | * as the DMA API guarantees that the coherent DMA mask can be set to |
| 493 | * the same or smaller than the streaming DMA mask. |
| 494 | */ |
| 495 | static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask) |
| 496 | { |
| 497 | int rc = dma_set_mask(dev, mask); |
| 498 | if (rc == 0) |
| 499 | dma_set_coherent_mask(dev, mask); |
| 500 | return rc; |
| 501 | } |
| 502 | |
Russell King | fa6a8d6 | 2013-06-27 12:21:45 +0100 | [diff] [blame] | 503 | /* |
| 504 | * Similar to the above, except it deals with the case where the device |
| 505 | * does not have dev->dma_mask appropriately setup. |
| 506 | */ |
| 507 | static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask) |
| 508 | { |
| 509 | dev->dma_mask = &dev->coherent_dma_mask; |
| 510 | return dma_set_mask_and_coherent(dev, mask); |
| 511 | } |
| 512 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | extern u64 dma_get_required_mask(struct device *dev); |
| 514 | |
Will Deacon | a3a60f8 | 2014-08-27 15:49:10 +0100 | [diff] [blame] | 515 | #ifndef arch_setup_dma_ops |
Will Deacon | 97890ba | 2014-08-27 16:24:20 +0100 | [diff] [blame] | 516 | static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base, |
Robin Murphy | 53c92d7 | 2016-04-07 18:42:05 +0100 | [diff] [blame] | 517 | u64 size, const struct iommu_ops *iommu, |
Will Deacon | 97890ba | 2014-08-27 16:24:20 +0100 | [diff] [blame] | 518 | bool coherent) { } |
| 519 | #endif |
| 520 | |
| 521 | #ifndef arch_teardown_dma_ops |
| 522 | static inline void arch_teardown_dma_ops(struct device *dev) { } |
Santosh Shilimkar | 591c1ee | 2014-04-24 11:30:04 -0400 | [diff] [blame] | 523 | #endif |
| 524 | |
FUJITA Tomonori | 6b7b651 | 2008-02-04 22:27:55 -0800 | [diff] [blame] | 525 | static inline unsigned int dma_get_max_seg_size(struct device *dev) |
| 526 | { |
Robin Murphy | 002edb6 | 2015-11-06 16:32:51 -0800 | [diff] [blame] | 527 | if (dev->dma_parms && dev->dma_parms->max_segment_size) |
| 528 | return dev->dma_parms->max_segment_size; |
| 529 | return SZ_64K; |
FUJITA Tomonori | 6b7b651 | 2008-02-04 22:27:55 -0800 | [diff] [blame] | 530 | } |
| 531 | |
| 532 | static inline unsigned int dma_set_max_seg_size(struct device *dev, |
| 533 | unsigned int size) |
| 534 | { |
| 535 | if (dev->dma_parms) { |
| 536 | dev->dma_parms->max_segment_size = size; |
| 537 | return 0; |
Robin Murphy | 002edb6 | 2015-11-06 16:32:51 -0800 | [diff] [blame] | 538 | } |
| 539 | return -EIO; |
FUJITA Tomonori | 6b7b651 | 2008-02-04 22:27:55 -0800 | [diff] [blame] | 540 | } |
| 541 | |
FUJITA Tomonori | d22a696 | 2008-02-04 22:28:13 -0800 | [diff] [blame] | 542 | static inline unsigned long dma_get_seg_boundary(struct device *dev) |
| 543 | { |
Robin Murphy | 002edb6 | 2015-11-06 16:32:51 -0800 | [diff] [blame] | 544 | if (dev->dma_parms && dev->dma_parms->segment_boundary_mask) |
| 545 | return dev->dma_parms->segment_boundary_mask; |
| 546 | return DMA_BIT_MASK(32); |
FUJITA Tomonori | d22a696 | 2008-02-04 22:28:13 -0800 | [diff] [blame] | 547 | } |
| 548 | |
| 549 | static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask) |
| 550 | { |
| 551 | if (dev->dma_parms) { |
| 552 | dev->dma_parms->segment_boundary_mask = mask; |
| 553 | return 0; |
Robin Murphy | 002edb6 | 2015-11-06 16:32:51 -0800 | [diff] [blame] | 554 | } |
| 555 | return -EIO; |
FUJITA Tomonori | d22a696 | 2008-02-04 22:28:13 -0800 | [diff] [blame] | 556 | } |
| 557 | |
Santosh Shilimkar | 00c8f16 | 2013-07-29 14:18:48 +0100 | [diff] [blame] | 558 | #ifndef dma_max_pfn |
| 559 | static inline unsigned long dma_max_pfn(struct device *dev) |
| 560 | { |
| 561 | return *dev->dma_mask >> PAGE_SHIFT; |
| 562 | } |
| 563 | #endif |
| 564 | |
Andrew Morton | 842fa69 | 2011-11-02 13:39:33 -0700 | [diff] [blame] | 565 | static inline void *dma_zalloc_coherent(struct device *dev, size_t size, |
| 566 | dma_addr_t *dma_handle, gfp_t flag) |
| 567 | { |
Joe Perches | ede23fa | 2013-08-26 22:45:23 -0700 | [diff] [blame] | 568 | void *ret = dma_alloc_coherent(dev, size, dma_handle, |
| 569 | flag | __GFP_ZERO); |
Andrew Morton | 842fa69 | 2011-11-02 13:39:33 -0700 | [diff] [blame] | 570 | return ret; |
| 571 | } |
| 572 | |
Heiko Carstens | e259f19 | 2010-08-13 09:39:18 +0200 | [diff] [blame] | 573 | #ifdef CONFIG_HAS_DMA |
FUJITA Tomonori | 4565f01 | 2010-08-10 18:03:22 -0700 | [diff] [blame] | 574 | static inline int dma_get_cache_alignment(void) |
| 575 | { |
| 576 | #ifdef ARCH_DMA_MINALIGN |
| 577 | return ARCH_DMA_MINALIGN; |
| 578 | #endif |
| 579 | return 1; |
| 580 | } |
Heiko Carstens | e259f19 | 2010-08-13 09:39:18 +0200 | [diff] [blame] | 581 | #endif |
FUJITA Tomonori | 4565f01 | 2010-08-10 18:03:22 -0700 | [diff] [blame] | 582 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 583 | /* flags for the coherent memory api */ |
| 584 | #define DMA_MEMORY_MAP 0x01 |
| 585 | #define DMA_MEMORY_IO 0x02 |
| 586 | #define DMA_MEMORY_INCLUDES_CHILDREN 0x04 |
| 587 | #define DMA_MEMORY_EXCLUSIVE 0x08 |
| 588 | |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 589 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
| 590 | int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
| 591 | dma_addr_t device_addr, size_t size, int flags); |
| 592 | void dma_release_declared_memory(struct device *dev); |
| 593 | void *dma_mark_declared_memory_occupied(struct device *dev, |
| 594 | dma_addr_t device_addr, size_t size); |
| 595 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 596 | static inline int |
Bjorn Helgaas | 88a984b | 2014-05-20 16:54:22 -0600 | [diff] [blame] | 597 | dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 598 | dma_addr_t device_addr, size_t size, int flags) |
| 599 | { |
| 600 | return 0; |
| 601 | } |
| 602 | |
| 603 | static inline void |
| 604 | dma_release_declared_memory(struct device *dev) |
| 605 | { |
| 606 | } |
| 607 | |
| 608 | static inline void * |
| 609 | dma_mark_declared_memory_occupied(struct device *dev, |
| 610 | dma_addr_t device_addr, size_t size) |
| 611 | { |
| 612 | return ERR_PTR(-EBUSY); |
| 613 | } |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 614 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | |
Tejun Heo | 9ac7849 | 2007-01-20 16:00:26 +0900 | [diff] [blame] | 616 | /* |
| 617 | * Managed DMA API |
| 618 | */ |
| 619 | extern void *dmam_alloc_coherent(struct device *dev, size_t size, |
| 620 | dma_addr_t *dma_handle, gfp_t gfp); |
| 621 | extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, |
| 622 | dma_addr_t dma_handle); |
| 623 | extern void *dmam_alloc_noncoherent(struct device *dev, size_t size, |
| 624 | dma_addr_t *dma_handle, gfp_t gfp); |
| 625 | extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr, |
| 626 | dma_addr_t dma_handle); |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 627 | #ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT |
Bjorn Helgaas | 88a984b | 2014-05-20 16:54:22 -0600 | [diff] [blame] | 628 | extern int dmam_declare_coherent_memory(struct device *dev, |
| 629 | phys_addr_t phys_addr, |
Tejun Heo | 9ac7849 | 2007-01-20 16:00:26 +0900 | [diff] [blame] | 630 | dma_addr_t device_addr, size_t size, |
| 631 | int flags); |
| 632 | extern void dmam_release_declared_memory(struct device *dev); |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 633 | #else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
Tejun Heo | 9ac7849 | 2007-01-20 16:00:26 +0900 | [diff] [blame] | 634 | static inline int dmam_declare_coherent_memory(struct device *dev, |
Bjorn Helgaas | 88a984b | 2014-05-20 16:54:22 -0600 | [diff] [blame] | 635 | phys_addr_t phys_addr, dma_addr_t device_addr, |
Tejun Heo | 9ac7849 | 2007-01-20 16:00:26 +0900 | [diff] [blame] | 636 | size_t size, gfp_t gfp) |
| 637 | { |
| 638 | return 0; |
| 639 | } |
| 640 | |
| 641 | static inline void dmam_release_declared_memory(struct device *dev) |
| 642 | { |
| 643 | } |
Christoph Hellwig | 20d666e | 2016-01-20 15:02:09 -0800 | [diff] [blame] | 644 | #endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */ |
Tejun Heo | 9ac7849 | 2007-01-20 16:00:26 +0900 | [diff] [blame] | 645 | |
Luis R. Rodriguez | f6e4566 | 2016-01-22 18:34:22 -0800 | [diff] [blame] | 646 | static inline void *dma_alloc_wc(struct device *dev, size_t size, |
| 647 | dma_addr_t *dma_addr, gfp_t gfp) |
Thierry Reding | b4bbb10 | 2014-06-27 11:56:58 +0200 | [diff] [blame] | 648 | { |
| 649 | DEFINE_DMA_ATTRS(attrs); |
| 650 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); |
| 651 | return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs); |
| 652 | } |
Luis R. Rodriguez | f6e4566 | 2016-01-22 18:34:22 -0800 | [diff] [blame] | 653 | #ifndef dma_alloc_writecombine |
| 654 | #define dma_alloc_writecombine dma_alloc_wc |
| 655 | #endif |
Thierry Reding | b4bbb10 | 2014-06-27 11:56:58 +0200 | [diff] [blame] | 656 | |
Luis R. Rodriguez | f6e4566 | 2016-01-22 18:34:22 -0800 | [diff] [blame] | 657 | static inline void dma_free_wc(struct device *dev, size_t size, |
| 658 | void *cpu_addr, dma_addr_t dma_addr) |
Thierry Reding | b4bbb10 | 2014-06-27 11:56:58 +0200 | [diff] [blame] | 659 | { |
| 660 | DEFINE_DMA_ATTRS(attrs); |
| 661 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); |
| 662 | return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs); |
| 663 | } |
Luis R. Rodriguez | f6e4566 | 2016-01-22 18:34:22 -0800 | [diff] [blame] | 664 | #ifndef dma_free_writecombine |
| 665 | #define dma_free_writecombine dma_free_wc |
| 666 | #endif |
Thierry Reding | b4bbb10 | 2014-06-27 11:56:58 +0200 | [diff] [blame] | 667 | |
Luis R. Rodriguez | f6e4566 | 2016-01-22 18:34:22 -0800 | [diff] [blame] | 668 | static inline int dma_mmap_wc(struct device *dev, |
| 669 | struct vm_area_struct *vma, |
| 670 | void *cpu_addr, dma_addr_t dma_addr, |
| 671 | size_t size) |
Thierry Reding | b4bbb10 | 2014-06-27 11:56:58 +0200 | [diff] [blame] | 672 | { |
| 673 | DEFINE_DMA_ATTRS(attrs); |
| 674 | dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs); |
| 675 | return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs); |
| 676 | } |
Luis R. Rodriguez | f6e4566 | 2016-01-22 18:34:22 -0800 | [diff] [blame] | 677 | #ifndef dma_mmap_writecombine |
| 678 | #define dma_mmap_writecombine dma_mmap_wc |
| 679 | #endif |
Arthur Kepner | 74bc7ce | 2008-04-29 01:00:30 -0700 | [diff] [blame] | 680 | |
FUJITA Tomonori | 0acedc1 | 2010-03-10 15:23:31 -0800 | [diff] [blame] | 681 | #ifdef CONFIG_NEED_DMA_MAP_STATE |
| 682 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME |
| 683 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME |
| 684 | #define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME) |
| 685 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL)) |
| 686 | #define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME) |
| 687 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL)) |
| 688 | #else |
| 689 | #define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) |
| 690 | #define DEFINE_DMA_UNMAP_LEN(LEN_NAME) |
| 691 | #define dma_unmap_addr(PTR, ADDR_NAME) (0) |
| 692 | #define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0) |
| 693 | #define dma_unmap_len(PTR, LEN_NAME) (0) |
| 694 | #define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0) |
| 695 | #endif |
| 696 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 697 | #endif |