blob: 71c1b215ef6606a3f512d592f3106c4d793265de [file] [log] [blame]
Robert P. J. Day96532ba2008-02-03 15:06:26 +02001#ifndef _LINUX_DMA_MAPPING_H
2#define _LINUX_DMA_MAPPING_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
Robin Murphy002edb62015-11-06 16:32:51 -08004#include <linux/sizes.h>
Andrew Morton842fa692011-11-02 13:39:33 -07005#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/device.h>
7#include <linux/err.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +09008#include <linux/dma-attrs.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -08009#include <linux/dma-debug.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000010#include <linux/dma-direction.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090011#include <linux/scatterlist.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -080012#include <linux/kmemcheck.h>
13#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060015/*
16 * A dma_addr_t can hold any valid DMA or bus address for the platform.
17 * It can be given to a device to use as a DMA source or target. A CPU cannot
18 * reference a dma_addr_t directly because there may be translation between
19 * its physical address space and the bus address space.
20 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090021struct dma_map_ops {
Marek Szyprowski613c4572012-03-28 16:36:27 +020022 void* (*alloc)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp,
24 struct dma_attrs *attrs);
25 void (*free)(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle,
27 struct dma_attrs *attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010028 int (*mmap)(struct device *, struct vm_area_struct *,
29 void *, dma_addr_t, size_t, struct dma_attrs *attrs);
30
Marek Szyprowskid2b74282012-06-13 10:05:52 +020031 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
32 dma_addr_t, size_t, struct dma_attrs *attrs);
33
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090034 dma_addr_t (*map_page)(struct device *dev, struct page *page,
35 unsigned long offset, size_t size,
36 enum dma_data_direction dir,
37 struct dma_attrs *attrs);
38 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
39 size_t size, enum dma_data_direction dir,
40 struct dma_attrs *attrs);
Ricardo Ribalda Delgado04abab62015-02-11 13:53:15 +010041 /*
42 * map_sg returns 0 on error and a value > 0 on success.
43 * It should never return a value < 0.
44 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090045 int (*map_sg)(struct device *dev, struct scatterlist *sg,
46 int nents, enum dma_data_direction dir,
47 struct dma_attrs *attrs);
48 void (*unmap_sg)(struct device *dev,
49 struct scatterlist *sg, int nents,
50 enum dma_data_direction dir,
51 struct dma_attrs *attrs);
52 void (*sync_single_for_cpu)(struct device *dev,
53 dma_addr_t dma_handle, size_t size,
54 enum dma_data_direction dir);
55 void (*sync_single_for_device)(struct device *dev,
56 dma_addr_t dma_handle, size_t size,
57 enum dma_data_direction dir);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090058 void (*sync_sg_for_cpu)(struct device *dev,
59 struct scatterlist *sg, int nents,
60 enum dma_data_direction dir);
61 void (*sync_sg_for_device)(struct device *dev,
62 struct scatterlist *sg, int nents,
63 enum dma_data_direction dir);
64 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
65 int (*dma_supported)(struct device *dev, u64 mask);
FUJITA Tomonorif726f30e2009-08-04 19:08:24 +000066 int (*set_dma_mask)(struct device *dev, u64 mask);
Milton Miller3a8f7552011-06-24 09:05:23 +000067#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
68 u64 (*get_required_mask)(struct device *dev);
69#endif
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090070 int is_phys;
71};
72
Christian Borntraegera8463d42016-02-02 21:46:32 -080073extern struct dma_map_ops dma_noop_ops;
74
Andrew Morton8f286c32007-10-18 03:05:07 -070075#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
Borislav Petkov34c65382007-10-18 03:05:06 -070076
James Bottomley32e8f702007-10-16 01:23:55 -070077#define DMA_MASK_NONE 0x0ULL
78
Rolf Eike Beerd6bd3a32006-09-29 01:59:48 -070079static inline int valid_dma_direction(int dma_direction)
80{
81 return ((dma_direction == DMA_BIDIRECTIONAL) ||
82 (dma_direction == DMA_TO_DEVICE) ||
83 (dma_direction == DMA_FROM_DEVICE));
84}
85
James Bottomley32e8f702007-10-16 01:23:55 -070086static inline int is_device_dma_capable(struct device *dev)
87{
88 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
89}
90
Christoph Hellwig20d666e2016-01-20 15:02:09 -080091#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
92/*
93 * These three functions are only for dma allocator.
94 * Don't use them in device drivers.
95 */
96int dma_alloc_from_coherent(struct device *dev, ssize_t size,
97 dma_addr_t *dma_handle, void **ret);
98int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
99
100int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
101 void *cpu_addr, size_t size, int *ret);
102#else
103#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
104#define dma_release_from_coherent(dev, order, vaddr) (0)
105#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
106#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
107
Dan Williams1b0fac42007-07-15 23:40:26 -0700108#ifdef CONFIG_HAS_DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109#include <asm/dma-mapping.h>
Dan Williams1b0fac42007-07-15 23:40:26 -0700110#else
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800111/*
112 * Define the dma api to allow compilation but not linking of
113 * dma dependent code. Code that depends on the dma-mapping
114 * API needs to set 'depends on HAS_DMA' in its Kconfig
115 */
116extern struct dma_map_ops bad_dma_ops;
117static inline struct dma_map_ops *get_dma_ops(struct device *dev)
118{
119 return &bad_dma_ops;
120}
121#endif
122
123static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
124 size_t size,
125 enum dma_data_direction dir,
126 struct dma_attrs *attrs)
127{
128 struct dma_map_ops *ops = get_dma_ops(dev);
129 dma_addr_t addr;
130
131 kmemcheck_mark_initialized(ptr, size);
132 BUG_ON(!valid_dma_direction(dir));
133 addr = ops->map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800134 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800135 dir, attrs);
136 debug_dma_map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800137 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800138 dir, addr, true);
139 return addr;
140}
141
142static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
143 size_t size,
144 enum dma_data_direction dir,
145 struct dma_attrs *attrs)
146{
147 struct dma_map_ops *ops = get_dma_ops(dev);
148
149 BUG_ON(!valid_dma_direction(dir));
150 if (ops->unmap_page)
151 ops->unmap_page(dev, addr, size, dir, attrs);
152 debug_dma_unmap_page(dev, addr, size, dir, true);
153}
154
155/*
156 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
157 * It should never return a value < 0.
158 */
159static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
160 int nents, enum dma_data_direction dir,
161 struct dma_attrs *attrs)
162{
163 struct dma_map_ops *ops = get_dma_ops(dev);
164 int i, ents;
165 struct scatterlist *s;
166
167 for_each_sg(sg, s, nents, i)
168 kmemcheck_mark_initialized(sg_virt(s), s->length);
169 BUG_ON(!valid_dma_direction(dir));
170 ents = ops->map_sg(dev, sg, nents, dir, attrs);
171 BUG_ON(ents < 0);
172 debug_dma_map_sg(dev, sg, nents, ents, dir);
173
174 return ents;
175}
176
177static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
178 int nents, enum dma_data_direction dir,
179 struct dma_attrs *attrs)
180{
181 struct dma_map_ops *ops = get_dma_ops(dev);
182
183 BUG_ON(!valid_dma_direction(dir));
184 debug_dma_unmap_sg(dev, sg, nents, dir);
185 if (ops->unmap_sg)
186 ops->unmap_sg(dev, sg, nents, dir, attrs);
187}
188
189static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
190 size_t offset, size_t size,
191 enum dma_data_direction dir)
192{
193 struct dma_map_ops *ops = get_dma_ops(dev);
194 dma_addr_t addr;
195
196 kmemcheck_mark_initialized(page_address(page) + offset, size);
197 BUG_ON(!valid_dma_direction(dir));
198 addr = ops->map_page(dev, page, offset, size, dir, NULL);
199 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
200
201 return addr;
202}
203
204static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
205 size_t size, enum dma_data_direction dir)
206{
207 struct dma_map_ops *ops = get_dma_ops(dev);
208
209 BUG_ON(!valid_dma_direction(dir));
210 if (ops->unmap_page)
211 ops->unmap_page(dev, addr, size, dir, NULL);
212 debug_dma_unmap_page(dev, addr, size, dir, false);
213}
214
215static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
216 size_t size,
217 enum dma_data_direction dir)
218{
219 struct dma_map_ops *ops = get_dma_ops(dev);
220
221 BUG_ON(!valid_dma_direction(dir));
222 if (ops->sync_single_for_cpu)
223 ops->sync_single_for_cpu(dev, addr, size, dir);
224 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
225}
226
227static inline void dma_sync_single_for_device(struct device *dev,
228 dma_addr_t addr, size_t size,
229 enum dma_data_direction dir)
230{
231 struct dma_map_ops *ops = get_dma_ops(dev);
232
233 BUG_ON(!valid_dma_direction(dir));
234 if (ops->sync_single_for_device)
235 ops->sync_single_for_device(dev, addr, size, dir);
236 debug_dma_sync_single_for_device(dev, addr, size, dir);
237}
238
239static inline void dma_sync_single_range_for_cpu(struct device *dev,
240 dma_addr_t addr,
241 unsigned long offset,
242 size_t size,
243 enum dma_data_direction dir)
244{
245 const struct dma_map_ops *ops = get_dma_ops(dev);
246
247 BUG_ON(!valid_dma_direction(dir));
248 if (ops->sync_single_for_cpu)
249 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
250 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
251}
252
253static inline void dma_sync_single_range_for_device(struct device *dev,
254 dma_addr_t addr,
255 unsigned long offset,
256 size_t size,
257 enum dma_data_direction dir)
258{
259 const struct dma_map_ops *ops = get_dma_ops(dev);
260
261 BUG_ON(!valid_dma_direction(dir));
262 if (ops->sync_single_for_device)
263 ops->sync_single_for_device(dev, addr + offset, size, dir);
264 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
265}
266
267static inline void
268dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
269 int nelems, enum dma_data_direction dir)
270{
271 struct dma_map_ops *ops = get_dma_ops(dev);
272
273 BUG_ON(!valid_dma_direction(dir));
274 if (ops->sync_sg_for_cpu)
275 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
276 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
277}
278
279static inline void
280dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
281 int nelems, enum dma_data_direction dir)
282{
283 struct dma_map_ops *ops = get_dma_ops(dev);
284
285 BUG_ON(!valid_dma_direction(dir));
286 if (ops->sync_sg_for_device)
287 ops->sync_sg_for_device(dev, sg, nelems, dir);
288 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
289
290}
291
292#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
293#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
294#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
295#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
296
297extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
298 void *cpu_addr, dma_addr_t dma_addr, size_t size);
299
300void *dma_common_contiguous_remap(struct page *page, size_t size,
301 unsigned long vm_flags,
302 pgprot_t prot, const void *caller);
303
304void *dma_common_pages_remap(struct page **pages, size_t size,
305 unsigned long vm_flags, pgprot_t prot,
306 const void *caller);
307void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
308
309/**
310 * dma_mmap_attrs - map a coherent DMA allocation into user space
311 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
312 * @vma: vm_area_struct describing requested user mapping
313 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
314 * @handle: device-view address returned from dma_alloc_attrs
315 * @size: size of memory originally requested in dma_alloc_attrs
316 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
317 *
318 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
319 * into user space. The coherent DMA buffer must not be freed by the
320 * driver until the user space mapping has been released.
321 */
322static inline int
323dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
324 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
325{
326 struct dma_map_ops *ops = get_dma_ops(dev);
327 BUG_ON(!ops);
328 if (ops->mmap)
329 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
330 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
331}
332
333#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
334
335int
336dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
337 void *cpu_addr, dma_addr_t dma_addr, size_t size);
338
339static inline int
340dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
341 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
342{
343 struct dma_map_ops *ops = get_dma_ops(dev);
344 BUG_ON(!ops);
345 if (ops->get_sgtable)
346 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
347 attrs);
348 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
349}
350
351#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
352
353#ifndef arch_dma_alloc_attrs
354#define arch_dma_alloc_attrs(dev, flag) (true)
355#endif
356
357static inline void *dma_alloc_attrs(struct device *dev, size_t size,
358 dma_addr_t *dma_handle, gfp_t flag,
359 struct dma_attrs *attrs)
360{
361 struct dma_map_ops *ops = get_dma_ops(dev);
362 void *cpu_addr;
363
364 BUG_ON(!ops);
365
366 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
367 return cpu_addr;
368
369 if (!arch_dma_alloc_attrs(&dev, &flag))
370 return NULL;
371 if (!ops->alloc)
372 return NULL;
373
374 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
375 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
376 return cpu_addr;
377}
378
379static inline void dma_free_attrs(struct device *dev, size_t size,
380 void *cpu_addr, dma_addr_t dma_handle,
381 struct dma_attrs *attrs)
382{
383 struct dma_map_ops *ops = get_dma_ops(dev);
384
385 BUG_ON(!ops);
386 WARN_ON(irqs_disabled());
387
388 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
389 return;
390
Zhen Leid6b7eae2016-03-09 14:08:38 -0800391 if (!ops->free || !cpu_addr)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800392 return;
393
394 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
395 ops->free(dev, size, cpu_addr, dma_handle, attrs);
396}
397
398static inline void *dma_alloc_coherent(struct device *dev, size_t size,
399 dma_addr_t *dma_handle, gfp_t flag)
400{
401 return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
402}
403
404static inline void dma_free_coherent(struct device *dev, size_t size,
405 void *cpu_addr, dma_addr_t dma_handle)
406{
407 return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
408}
409
410static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
411 dma_addr_t *dma_handle, gfp_t gfp)
412{
413 DEFINE_DMA_ATTRS(attrs);
414
415 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
416 return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
417}
418
419static inline void dma_free_noncoherent(struct device *dev, size_t size,
420 void *cpu_addr, dma_addr_t dma_handle)
421{
422 DEFINE_DMA_ATTRS(attrs);
423
424 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
425 dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
426}
427
428static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
429{
430 debug_dma_mapping_error(dev, dma_addr);
431
432 if (get_dma_ops(dev)->mapping_error)
433 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
434
435#ifdef DMA_ERROR_CODE
436 return dma_addr == DMA_ERROR_CODE;
437#else
438 return 0;
439#endif
440}
441
442#ifndef HAVE_ARCH_DMA_SUPPORTED
443static inline int dma_supported(struct device *dev, u64 mask)
444{
445 struct dma_map_ops *ops = get_dma_ops(dev);
446
447 if (!ops)
448 return 0;
449 if (!ops->dma_supported)
450 return 1;
451 return ops->dma_supported(dev, mask);
452}
453#endif
454
455#ifndef HAVE_ARCH_DMA_SET_MASK
456static inline int dma_set_mask(struct device *dev, u64 mask)
457{
458 struct dma_map_ops *ops = get_dma_ops(dev);
459
460 if (ops->set_dma_mask)
461 return ops->set_dma_mask(dev, mask);
462
463 if (!dev->dma_mask || !dma_supported(dev, mask))
464 return -EIO;
465 *dev->dma_mask = mask;
466 return 0;
467}
Dan Williams1b0fac42007-07-15 23:40:26 -0700468#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900470static inline u64 dma_get_mask(struct device *dev)
471{
FUJITA Tomonori07a2c012008-09-19 02:02:05 +0900472 if (dev && dev->dma_mask && *dev->dma_mask)
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900473 return *dev->dma_mask;
Yang Hongyang284901a2009-04-06 19:01:15 -0700474 return DMA_BIT_MASK(32);
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900475}
476
Rob Herring58af4a22012-03-20 14:33:01 -0500477#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700478int dma_set_coherent_mask(struct device *dev, u64 mask);
479#else
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800480static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
481{
482 if (!dma_supported(dev, mask))
483 return -EIO;
484 dev->coherent_dma_mask = mask;
485 return 0;
486}
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700487#endif
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800488
Russell King4aa806b2013-06-26 13:49:44 +0100489/*
490 * Set both the DMA mask and the coherent DMA mask to the same thing.
491 * Note that we don't check the return value from dma_set_coherent_mask()
492 * as the DMA API guarantees that the coherent DMA mask can be set to
493 * the same or smaller than the streaming DMA mask.
494 */
495static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
496{
497 int rc = dma_set_mask(dev, mask);
498 if (rc == 0)
499 dma_set_coherent_mask(dev, mask);
500 return rc;
501}
502
Russell Kingfa6a8d62013-06-27 12:21:45 +0100503/*
504 * Similar to the above, except it deals with the case where the device
505 * does not have dev->dma_mask appropriately setup.
506 */
507static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
508{
509 dev->dma_mask = &dev->coherent_dma_mask;
510 return dma_set_mask_and_coherent(dev, mask);
511}
512
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513extern u64 dma_get_required_mask(struct device *dev);
514
Will Deacona3a60f82014-08-27 15:49:10 +0100515#ifndef arch_setup_dma_ops
Will Deacon97890ba2014-08-27 16:24:20 +0100516static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
Robin Murphy53c92d72016-04-07 18:42:05 +0100517 u64 size, const struct iommu_ops *iommu,
Will Deacon97890ba2014-08-27 16:24:20 +0100518 bool coherent) { }
519#endif
520
521#ifndef arch_teardown_dma_ops
522static inline void arch_teardown_dma_ops(struct device *dev) { }
Santosh Shilimkar591c1ee2014-04-24 11:30:04 -0400523#endif
524
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800525static inline unsigned int dma_get_max_seg_size(struct device *dev)
526{
Robin Murphy002edb62015-11-06 16:32:51 -0800527 if (dev->dma_parms && dev->dma_parms->max_segment_size)
528 return dev->dma_parms->max_segment_size;
529 return SZ_64K;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800530}
531
532static inline unsigned int dma_set_max_seg_size(struct device *dev,
533 unsigned int size)
534{
535 if (dev->dma_parms) {
536 dev->dma_parms->max_segment_size = size;
537 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800538 }
539 return -EIO;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800540}
541
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800542static inline unsigned long dma_get_seg_boundary(struct device *dev)
543{
Robin Murphy002edb62015-11-06 16:32:51 -0800544 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
545 return dev->dma_parms->segment_boundary_mask;
546 return DMA_BIT_MASK(32);
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800547}
548
549static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
550{
551 if (dev->dma_parms) {
552 dev->dma_parms->segment_boundary_mask = mask;
553 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800554 }
555 return -EIO;
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800556}
557
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100558#ifndef dma_max_pfn
559static inline unsigned long dma_max_pfn(struct device *dev)
560{
561 return *dev->dma_mask >> PAGE_SHIFT;
562}
563#endif
564
Andrew Morton842fa692011-11-02 13:39:33 -0700565static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
566 dma_addr_t *dma_handle, gfp_t flag)
567{
Joe Perchesede23fa2013-08-26 22:45:23 -0700568 void *ret = dma_alloc_coherent(dev, size, dma_handle,
569 flag | __GFP_ZERO);
Andrew Morton842fa692011-11-02 13:39:33 -0700570 return ret;
571}
572
Heiko Carstense259f192010-08-13 09:39:18 +0200573#ifdef CONFIG_HAS_DMA
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700574static inline int dma_get_cache_alignment(void)
575{
576#ifdef ARCH_DMA_MINALIGN
577 return ARCH_DMA_MINALIGN;
578#endif
579 return 1;
580}
Heiko Carstense259f192010-08-13 09:39:18 +0200581#endif
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700582
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583/* flags for the coherent memory api */
584#define DMA_MEMORY_MAP 0x01
585#define DMA_MEMORY_IO 0x02
586#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
587#define DMA_MEMORY_EXCLUSIVE 0x08
588
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800589#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
590int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
591 dma_addr_t device_addr, size_t size, int flags);
592void dma_release_declared_memory(struct device *dev);
593void *dma_mark_declared_memory_occupied(struct device *dev,
594 dma_addr_t device_addr, size_t size);
595#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596static inline int
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600597dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598 dma_addr_t device_addr, size_t size, int flags)
599{
600 return 0;
601}
602
603static inline void
604dma_release_declared_memory(struct device *dev)
605{
606}
607
608static inline void *
609dma_mark_declared_memory_occupied(struct device *dev,
610 dma_addr_t device_addr, size_t size)
611{
612 return ERR_PTR(-EBUSY);
613}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800614#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615
Tejun Heo9ac78492007-01-20 16:00:26 +0900616/*
617 * Managed DMA API
618 */
619extern void *dmam_alloc_coherent(struct device *dev, size_t size,
620 dma_addr_t *dma_handle, gfp_t gfp);
621extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
622 dma_addr_t dma_handle);
623extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
624 dma_addr_t *dma_handle, gfp_t gfp);
625extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
626 dma_addr_t dma_handle);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800627#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600628extern int dmam_declare_coherent_memory(struct device *dev,
629 phys_addr_t phys_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900630 dma_addr_t device_addr, size_t size,
631 int flags);
632extern void dmam_release_declared_memory(struct device *dev);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800633#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900634static inline int dmam_declare_coherent_memory(struct device *dev,
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600635 phys_addr_t phys_addr, dma_addr_t device_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900636 size_t size, gfp_t gfp)
637{
638 return 0;
639}
640
641static inline void dmam_release_declared_memory(struct device *dev)
642{
643}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800644#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900645
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800646static inline void *dma_alloc_wc(struct device *dev, size_t size,
647 dma_addr_t *dma_addr, gfp_t gfp)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200648{
649 DEFINE_DMA_ATTRS(attrs);
650 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
651 return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
652}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800653#ifndef dma_alloc_writecombine
654#define dma_alloc_writecombine dma_alloc_wc
655#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200656
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800657static inline void dma_free_wc(struct device *dev, size_t size,
658 void *cpu_addr, dma_addr_t dma_addr)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200659{
660 DEFINE_DMA_ATTRS(attrs);
661 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
662 return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
663}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800664#ifndef dma_free_writecombine
665#define dma_free_writecombine dma_free_wc
666#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200667
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800668static inline int dma_mmap_wc(struct device *dev,
669 struct vm_area_struct *vma,
670 void *cpu_addr, dma_addr_t dma_addr,
671 size_t size)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200672{
673 DEFINE_DMA_ATTRS(attrs);
674 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
675 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
676}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800677#ifndef dma_mmap_writecombine
678#define dma_mmap_writecombine dma_mmap_wc
679#endif
Arthur Kepner74bc7ce2008-04-29 01:00:30 -0700680
FUJITA Tomonori0acedc12010-03-10 15:23:31 -0800681#ifdef CONFIG_NEED_DMA_MAP_STATE
682#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
683#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
684#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
685#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
686#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
687#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
688#else
689#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
690#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
691#define dma_unmap_addr(PTR, ADDR_NAME) (0)
692#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
693#define dma_unmap_len(PTR, LEN_NAME) (0)
694#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
695#endif
696
Linus Torvalds1da177e2005-04-16 15:20:36 -0700697#endif