blob: cc0517b71c5ef22adaf75955bbbc0ebd29c88024 [file] [log] [blame]
Robert P. J. Day96532ba2008-02-03 15:06:26 +02001#ifndef _LINUX_DMA_MAPPING_H
2#define _LINUX_DMA_MAPPING_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
Robin Murphy002edb62015-11-06 16:32:51 -08004#include <linux/sizes.h>
Andrew Morton842fa692011-11-02 13:39:33 -07005#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/device.h>
7#include <linux/err.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +09008#include <linux/dma-attrs.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -08009#include <linux/dma-debug.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000010#include <linux/dma-direction.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090011#include <linux/scatterlist.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -080012#include <linux/kmemcheck.h>
13#include <linux/bug.h>
14#include <asm-generic/dma-coherent.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060016/*
17 * A dma_addr_t can hold any valid DMA or bus address for the platform.
18 * It can be given to a device to use as a DMA source or target. A CPU cannot
19 * reference a dma_addr_t directly because there may be translation between
20 * its physical address space and the bus address space.
21 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090022struct dma_map_ops {
Marek Szyprowski613c4572012-03-28 16:36:27 +020023 void* (*alloc)(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t gfp,
25 struct dma_attrs *attrs);
26 void (*free)(struct device *dev, size_t size,
27 void *vaddr, dma_addr_t dma_handle,
28 struct dma_attrs *attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010029 int (*mmap)(struct device *, struct vm_area_struct *,
30 void *, dma_addr_t, size_t, struct dma_attrs *attrs);
31
Marek Szyprowskid2b74282012-06-13 10:05:52 +020032 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
33 dma_addr_t, size_t, struct dma_attrs *attrs);
34
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090035 dma_addr_t (*map_page)(struct device *dev, struct page *page,
36 unsigned long offset, size_t size,
37 enum dma_data_direction dir,
38 struct dma_attrs *attrs);
39 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
40 size_t size, enum dma_data_direction dir,
41 struct dma_attrs *attrs);
Ricardo Ribalda Delgado04abab62015-02-11 13:53:15 +010042 /*
43 * map_sg returns 0 on error and a value > 0 on success.
44 * It should never return a value < 0.
45 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090046 int (*map_sg)(struct device *dev, struct scatterlist *sg,
47 int nents, enum dma_data_direction dir,
48 struct dma_attrs *attrs);
49 void (*unmap_sg)(struct device *dev,
50 struct scatterlist *sg, int nents,
51 enum dma_data_direction dir,
52 struct dma_attrs *attrs);
53 void (*sync_single_for_cpu)(struct device *dev,
54 dma_addr_t dma_handle, size_t size,
55 enum dma_data_direction dir);
56 void (*sync_single_for_device)(struct device *dev,
57 dma_addr_t dma_handle, size_t size,
58 enum dma_data_direction dir);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090059 void (*sync_sg_for_cpu)(struct device *dev,
60 struct scatterlist *sg, int nents,
61 enum dma_data_direction dir);
62 void (*sync_sg_for_device)(struct device *dev,
63 struct scatterlist *sg, int nents,
64 enum dma_data_direction dir);
65 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
66 int (*dma_supported)(struct device *dev, u64 mask);
FUJITA Tomonorif726f30e2009-08-04 19:08:24 +000067 int (*set_dma_mask)(struct device *dev, u64 mask);
Milton Miller3a8f7552011-06-24 09:05:23 +000068#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
69 u64 (*get_required_mask)(struct device *dev);
70#endif
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090071 int is_phys;
72};
73
Andrew Morton8f286c32007-10-18 03:05:07 -070074#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
Borislav Petkov34c65382007-10-18 03:05:06 -070075
James Bottomley32e8f702007-10-16 01:23:55 -070076#define DMA_MASK_NONE 0x0ULL
77
Rolf Eike Beerd6bd3a32006-09-29 01:59:48 -070078static inline int valid_dma_direction(int dma_direction)
79{
80 return ((dma_direction == DMA_BIDIRECTIONAL) ||
81 (dma_direction == DMA_TO_DEVICE) ||
82 (dma_direction == DMA_FROM_DEVICE));
83}
84
James Bottomley32e8f702007-10-16 01:23:55 -070085static inline int is_device_dma_capable(struct device *dev)
86{
87 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
88}
89
Dan Williams1b0fac42007-07-15 23:40:26 -070090#ifdef CONFIG_HAS_DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -070091#include <asm/dma-mapping.h>
Dan Williams1b0fac42007-07-15 23:40:26 -070092#else
Christoph Hellwige1c7e322016-01-20 15:02:05 -080093/*
94 * Define the dma api to allow compilation but not linking of
95 * dma dependent code. Code that depends on the dma-mapping
96 * API needs to set 'depends on HAS_DMA' in its Kconfig
97 */
98extern struct dma_map_ops bad_dma_ops;
99static inline struct dma_map_ops *get_dma_ops(struct device *dev)
100{
101 return &bad_dma_ops;
102}
103#endif
104
105static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
106 size_t size,
107 enum dma_data_direction dir,
108 struct dma_attrs *attrs)
109{
110 struct dma_map_ops *ops = get_dma_ops(dev);
111 dma_addr_t addr;
112
113 kmemcheck_mark_initialized(ptr, size);
114 BUG_ON(!valid_dma_direction(dir));
115 addr = ops->map_page(dev, virt_to_page(ptr),
116 (unsigned long)ptr & ~PAGE_MASK, size,
117 dir, attrs);
118 debug_dma_map_page(dev, virt_to_page(ptr),
119 (unsigned long)ptr & ~PAGE_MASK, size,
120 dir, addr, true);
121 return addr;
122}
123
124static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
125 size_t size,
126 enum dma_data_direction dir,
127 struct dma_attrs *attrs)
128{
129 struct dma_map_ops *ops = get_dma_ops(dev);
130
131 BUG_ON(!valid_dma_direction(dir));
132 if (ops->unmap_page)
133 ops->unmap_page(dev, addr, size, dir, attrs);
134 debug_dma_unmap_page(dev, addr, size, dir, true);
135}
136
137/*
138 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
139 * It should never return a value < 0.
140 */
141static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
142 int nents, enum dma_data_direction dir,
143 struct dma_attrs *attrs)
144{
145 struct dma_map_ops *ops = get_dma_ops(dev);
146 int i, ents;
147 struct scatterlist *s;
148
149 for_each_sg(sg, s, nents, i)
150 kmemcheck_mark_initialized(sg_virt(s), s->length);
151 BUG_ON(!valid_dma_direction(dir));
152 ents = ops->map_sg(dev, sg, nents, dir, attrs);
153 BUG_ON(ents < 0);
154 debug_dma_map_sg(dev, sg, nents, ents, dir);
155
156 return ents;
157}
158
159static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
160 int nents, enum dma_data_direction dir,
161 struct dma_attrs *attrs)
162{
163 struct dma_map_ops *ops = get_dma_ops(dev);
164
165 BUG_ON(!valid_dma_direction(dir));
166 debug_dma_unmap_sg(dev, sg, nents, dir);
167 if (ops->unmap_sg)
168 ops->unmap_sg(dev, sg, nents, dir, attrs);
169}
170
171static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
172 size_t offset, size_t size,
173 enum dma_data_direction dir)
174{
175 struct dma_map_ops *ops = get_dma_ops(dev);
176 dma_addr_t addr;
177
178 kmemcheck_mark_initialized(page_address(page) + offset, size);
179 BUG_ON(!valid_dma_direction(dir));
180 addr = ops->map_page(dev, page, offset, size, dir, NULL);
181 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
182
183 return addr;
184}
185
186static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
187 size_t size, enum dma_data_direction dir)
188{
189 struct dma_map_ops *ops = get_dma_ops(dev);
190
191 BUG_ON(!valid_dma_direction(dir));
192 if (ops->unmap_page)
193 ops->unmap_page(dev, addr, size, dir, NULL);
194 debug_dma_unmap_page(dev, addr, size, dir, false);
195}
196
197static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
198 size_t size,
199 enum dma_data_direction dir)
200{
201 struct dma_map_ops *ops = get_dma_ops(dev);
202
203 BUG_ON(!valid_dma_direction(dir));
204 if (ops->sync_single_for_cpu)
205 ops->sync_single_for_cpu(dev, addr, size, dir);
206 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
207}
208
209static inline void dma_sync_single_for_device(struct device *dev,
210 dma_addr_t addr, size_t size,
211 enum dma_data_direction dir)
212{
213 struct dma_map_ops *ops = get_dma_ops(dev);
214
215 BUG_ON(!valid_dma_direction(dir));
216 if (ops->sync_single_for_device)
217 ops->sync_single_for_device(dev, addr, size, dir);
218 debug_dma_sync_single_for_device(dev, addr, size, dir);
219}
220
221static inline void dma_sync_single_range_for_cpu(struct device *dev,
222 dma_addr_t addr,
223 unsigned long offset,
224 size_t size,
225 enum dma_data_direction dir)
226{
227 const struct dma_map_ops *ops = get_dma_ops(dev);
228
229 BUG_ON(!valid_dma_direction(dir));
230 if (ops->sync_single_for_cpu)
231 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
232 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
233}
234
235static inline void dma_sync_single_range_for_device(struct device *dev,
236 dma_addr_t addr,
237 unsigned long offset,
238 size_t size,
239 enum dma_data_direction dir)
240{
241 const struct dma_map_ops *ops = get_dma_ops(dev);
242
243 BUG_ON(!valid_dma_direction(dir));
244 if (ops->sync_single_for_device)
245 ops->sync_single_for_device(dev, addr + offset, size, dir);
246 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
247}
248
249static inline void
250dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
251 int nelems, enum dma_data_direction dir)
252{
253 struct dma_map_ops *ops = get_dma_ops(dev);
254
255 BUG_ON(!valid_dma_direction(dir));
256 if (ops->sync_sg_for_cpu)
257 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
258 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
259}
260
261static inline void
262dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
263 int nelems, enum dma_data_direction dir)
264{
265 struct dma_map_ops *ops = get_dma_ops(dev);
266
267 BUG_ON(!valid_dma_direction(dir));
268 if (ops->sync_sg_for_device)
269 ops->sync_sg_for_device(dev, sg, nelems, dir);
270 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
271
272}
273
274#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, NULL)
275#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, NULL)
276#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, NULL)
277#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, NULL)
278
279extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
280 void *cpu_addr, dma_addr_t dma_addr, size_t size);
281
282void *dma_common_contiguous_remap(struct page *page, size_t size,
283 unsigned long vm_flags,
284 pgprot_t prot, const void *caller);
285
286void *dma_common_pages_remap(struct page **pages, size_t size,
287 unsigned long vm_flags, pgprot_t prot,
288 const void *caller);
289void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
290
291/**
292 * dma_mmap_attrs - map a coherent DMA allocation into user space
293 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
294 * @vma: vm_area_struct describing requested user mapping
295 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
296 * @handle: device-view address returned from dma_alloc_attrs
297 * @size: size of memory originally requested in dma_alloc_attrs
298 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
299 *
300 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
301 * into user space. The coherent DMA buffer must not be freed by the
302 * driver until the user space mapping has been released.
303 */
304static inline int
305dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
306 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
307{
308 struct dma_map_ops *ops = get_dma_ops(dev);
309 BUG_ON(!ops);
310 if (ops->mmap)
311 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
312 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
313}
314
315#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, NULL)
316
317int
318dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
319 void *cpu_addr, dma_addr_t dma_addr, size_t size);
320
321static inline int
322dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
323 dma_addr_t dma_addr, size_t size, struct dma_attrs *attrs)
324{
325 struct dma_map_ops *ops = get_dma_ops(dev);
326 BUG_ON(!ops);
327 if (ops->get_sgtable)
328 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
329 attrs);
330 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
331}
332
333#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, NULL)
334
335#ifndef arch_dma_alloc_attrs
336#define arch_dma_alloc_attrs(dev, flag) (true)
337#endif
338
339static inline void *dma_alloc_attrs(struct device *dev, size_t size,
340 dma_addr_t *dma_handle, gfp_t flag,
341 struct dma_attrs *attrs)
342{
343 struct dma_map_ops *ops = get_dma_ops(dev);
344 void *cpu_addr;
345
346 BUG_ON(!ops);
347
348 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
349 return cpu_addr;
350
351 if (!arch_dma_alloc_attrs(&dev, &flag))
352 return NULL;
353 if (!ops->alloc)
354 return NULL;
355
356 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
357 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
358 return cpu_addr;
359}
360
361static inline void dma_free_attrs(struct device *dev, size_t size,
362 void *cpu_addr, dma_addr_t dma_handle,
363 struct dma_attrs *attrs)
364{
365 struct dma_map_ops *ops = get_dma_ops(dev);
366
367 BUG_ON(!ops);
368 WARN_ON(irqs_disabled());
369
370 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
371 return;
372
373 if (!ops->free)
374 return;
375
376 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
377 ops->free(dev, size, cpu_addr, dma_handle, attrs);
378}
379
380static inline void *dma_alloc_coherent(struct device *dev, size_t size,
381 dma_addr_t *dma_handle, gfp_t flag)
382{
383 return dma_alloc_attrs(dev, size, dma_handle, flag, NULL);
384}
385
386static inline void dma_free_coherent(struct device *dev, size_t size,
387 void *cpu_addr, dma_addr_t dma_handle)
388{
389 return dma_free_attrs(dev, size, cpu_addr, dma_handle, NULL);
390}
391
392static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
393 dma_addr_t *dma_handle, gfp_t gfp)
394{
395 DEFINE_DMA_ATTRS(attrs);
396
397 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
398 return dma_alloc_attrs(dev, size, dma_handle, gfp, &attrs);
399}
400
401static inline void dma_free_noncoherent(struct device *dev, size_t size,
402 void *cpu_addr, dma_addr_t dma_handle)
403{
404 DEFINE_DMA_ATTRS(attrs);
405
406 dma_set_attr(DMA_ATTR_NON_CONSISTENT, &attrs);
407 dma_free_attrs(dev, size, cpu_addr, dma_handle, &attrs);
408}
409
410static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
411{
412 debug_dma_mapping_error(dev, dma_addr);
413
414 if (get_dma_ops(dev)->mapping_error)
415 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
416
417#ifdef DMA_ERROR_CODE
418 return dma_addr == DMA_ERROR_CODE;
419#else
420 return 0;
421#endif
422}
423
424#ifndef HAVE_ARCH_DMA_SUPPORTED
425static inline int dma_supported(struct device *dev, u64 mask)
426{
427 struct dma_map_ops *ops = get_dma_ops(dev);
428
429 if (!ops)
430 return 0;
431 if (!ops->dma_supported)
432 return 1;
433 return ops->dma_supported(dev, mask);
434}
435#endif
436
437#ifndef HAVE_ARCH_DMA_SET_MASK
438static inline int dma_set_mask(struct device *dev, u64 mask)
439{
440 struct dma_map_ops *ops = get_dma_ops(dev);
441
442 if (ops->set_dma_mask)
443 return ops->set_dma_mask(dev, mask);
444
445 if (!dev->dma_mask || !dma_supported(dev, mask))
446 return -EIO;
447 *dev->dma_mask = mask;
448 return 0;
449}
Dan Williams1b0fac42007-07-15 23:40:26 -0700450#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900452static inline u64 dma_get_mask(struct device *dev)
453{
FUJITA Tomonori07a2c012008-09-19 02:02:05 +0900454 if (dev && dev->dma_mask && *dev->dma_mask)
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900455 return *dev->dma_mask;
Yang Hongyang284901a2009-04-06 19:01:15 -0700456 return DMA_BIT_MASK(32);
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900457}
458
Rob Herring58af4a22012-03-20 14:33:01 -0500459#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700460int dma_set_coherent_mask(struct device *dev, u64 mask);
461#else
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800462static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
463{
464 if (!dma_supported(dev, mask))
465 return -EIO;
466 dev->coherent_dma_mask = mask;
467 return 0;
468}
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700469#endif
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800470
Russell King4aa806b2013-06-26 13:49:44 +0100471/*
472 * Set both the DMA mask and the coherent DMA mask to the same thing.
473 * Note that we don't check the return value from dma_set_coherent_mask()
474 * as the DMA API guarantees that the coherent DMA mask can be set to
475 * the same or smaller than the streaming DMA mask.
476 */
477static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
478{
479 int rc = dma_set_mask(dev, mask);
480 if (rc == 0)
481 dma_set_coherent_mask(dev, mask);
482 return rc;
483}
484
Russell Kingfa6a8d62013-06-27 12:21:45 +0100485/*
486 * Similar to the above, except it deals with the case where the device
487 * does not have dev->dma_mask appropriately setup.
488 */
489static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
490{
491 dev->dma_mask = &dev->coherent_dma_mask;
492 return dma_set_mask_and_coherent(dev, mask);
493}
494
Linus Torvalds1da177e2005-04-16 15:20:36 -0700495extern u64 dma_get_required_mask(struct device *dev);
496
Will Deacona3a60f82014-08-27 15:49:10 +0100497#ifndef arch_setup_dma_ops
Will Deacon97890ba2014-08-27 16:24:20 +0100498static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
499 u64 size, struct iommu_ops *iommu,
500 bool coherent) { }
501#endif
502
503#ifndef arch_teardown_dma_ops
504static inline void arch_teardown_dma_ops(struct device *dev) { }
Santosh Shilimkar591c1ee2014-04-24 11:30:04 -0400505#endif
506
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800507static inline unsigned int dma_get_max_seg_size(struct device *dev)
508{
Robin Murphy002edb62015-11-06 16:32:51 -0800509 if (dev->dma_parms && dev->dma_parms->max_segment_size)
510 return dev->dma_parms->max_segment_size;
511 return SZ_64K;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800512}
513
514static inline unsigned int dma_set_max_seg_size(struct device *dev,
515 unsigned int size)
516{
517 if (dev->dma_parms) {
518 dev->dma_parms->max_segment_size = size;
519 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800520 }
521 return -EIO;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800522}
523
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800524static inline unsigned long dma_get_seg_boundary(struct device *dev)
525{
Robin Murphy002edb62015-11-06 16:32:51 -0800526 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
527 return dev->dma_parms->segment_boundary_mask;
528 return DMA_BIT_MASK(32);
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800529}
530
531static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
532{
533 if (dev->dma_parms) {
534 dev->dma_parms->segment_boundary_mask = mask;
535 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800536 }
537 return -EIO;
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800538}
539
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100540#ifndef dma_max_pfn
541static inline unsigned long dma_max_pfn(struct device *dev)
542{
543 return *dev->dma_mask >> PAGE_SHIFT;
544}
545#endif
546
Andrew Morton842fa692011-11-02 13:39:33 -0700547static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
548 dma_addr_t *dma_handle, gfp_t flag)
549{
Joe Perchesede23fa82013-08-26 22:45:23 -0700550 void *ret = dma_alloc_coherent(dev, size, dma_handle,
551 flag | __GFP_ZERO);
Andrew Morton842fa692011-11-02 13:39:33 -0700552 return ret;
553}
554
Heiko Carstense259f192010-08-13 09:39:18 +0200555#ifdef CONFIG_HAS_DMA
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700556static inline int dma_get_cache_alignment(void)
557{
558#ifdef ARCH_DMA_MINALIGN
559 return ARCH_DMA_MINALIGN;
560#endif
561 return 1;
562}
Heiko Carstense259f192010-08-13 09:39:18 +0200563#endif
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700564
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565/* flags for the coherent memory api */
566#define DMA_MEMORY_MAP 0x01
567#define DMA_MEMORY_IO 0x02
568#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
569#define DMA_MEMORY_EXCLUSIVE 0x08
570
571#ifndef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
572static inline int
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600573dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 dma_addr_t device_addr, size_t size, int flags)
575{
576 return 0;
577}
578
579static inline void
580dma_release_declared_memory(struct device *dev)
581{
582}
583
584static inline void *
585dma_mark_declared_memory_occupied(struct device *dev,
586 dma_addr_t device_addr, size_t size)
587{
588 return ERR_PTR(-EBUSY);
589}
590#endif
591
Tejun Heo9ac78492007-01-20 16:00:26 +0900592/*
593 * Managed DMA API
594 */
595extern void *dmam_alloc_coherent(struct device *dev, size_t size,
596 dma_addr_t *dma_handle, gfp_t gfp);
597extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
598 dma_addr_t dma_handle);
599extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
600 dma_addr_t *dma_handle, gfp_t gfp);
601extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
602 dma_addr_t dma_handle);
603#ifdef ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600604extern int dmam_declare_coherent_memory(struct device *dev,
605 phys_addr_t phys_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900606 dma_addr_t device_addr, size_t size,
607 int flags);
608extern void dmam_release_declared_memory(struct device *dev);
609#else /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
610static inline int dmam_declare_coherent_memory(struct device *dev,
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600611 phys_addr_t phys_addr, dma_addr_t device_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900612 size_t size, gfp_t gfp)
613{
614 return 0;
615}
616
617static inline void dmam_release_declared_memory(struct device *dev)
618{
619}
620#endif /* ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY */
621
Thierry Redingb4bbb102014-06-27 11:56:58 +0200622static inline void *dma_alloc_writecombine(struct device *dev, size_t size,
623 dma_addr_t *dma_addr, gfp_t gfp)
624{
625 DEFINE_DMA_ATTRS(attrs);
626 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
627 return dma_alloc_attrs(dev, size, dma_addr, gfp, &attrs);
628}
629
630static inline void dma_free_writecombine(struct device *dev, size_t size,
631 void *cpu_addr, dma_addr_t dma_addr)
632{
633 DEFINE_DMA_ATTRS(attrs);
634 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
635 return dma_free_attrs(dev, size, cpu_addr, dma_addr, &attrs);
636}
637
638static inline int dma_mmap_writecombine(struct device *dev,
639 struct vm_area_struct *vma,
640 void *cpu_addr, dma_addr_t dma_addr,
641 size_t size)
642{
643 DEFINE_DMA_ATTRS(attrs);
644 dma_set_attr(DMA_ATTR_WRITE_COMBINE, &attrs);
645 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, &attrs);
646}
Arthur Kepner74bc7ce2008-04-29 01:00:30 -0700647
FUJITA Tomonori0acedc12010-03-10 15:23:31 -0800648#ifdef CONFIG_NEED_DMA_MAP_STATE
649#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
650#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
651#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
652#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
653#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
654#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
655#else
656#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
657#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
658#define dma_unmap_addr(PTR, ADDR_NAME) (0)
659#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
660#define dma_unmap_len(PTR, LEN_NAME) (0)
661#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
662#endif
663
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664#endif