blob: ec9c128486c83c075396d39b659b4acabc29434a [file] [log] [blame]
Robert P. J. Day96532ba2008-02-03 15:06:26 +02001#ifndef _LINUX_DMA_MAPPING_H
2#define _LINUX_DMA_MAPPING_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
Robin Murphy002edb62015-11-06 16:32:51 -08004#include <linux/sizes.h>
Andrew Morton842fa692011-11-02 13:39:33 -07005#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/device.h>
7#include <linux/err.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -08008#include <linux/dma-debug.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +00009#include <linux/dma-direction.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090010#include <linux/scatterlist.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -080011#include <linux/kmemcheck.h>
12#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070014/**
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
17 *
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
20 */
21#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
22/*
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
25 */
26#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
27/*
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
30 */
31#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
32/*
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
35 */
36#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
37/*
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
40 */
41#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
42/*
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
46 */
47#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
48/*
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
50 * in physical memory.
51 */
52#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
53/*
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
57 */
58#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
Laura Abbottcc36e642012-10-29 13:08:03 -070059/*
Mauricio Faria de Oliveiraa9a62c92016-10-11 13:54:14 -070060 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
61 * allocation failure reports (similarly to __GFP_NOWARN).
62 */
63#define DMA_ATTR_NO_WARN (1UL << 8)
Kyle Yan65be4a52016-10-31 15:05:00 -070064/*
Laura Abbottcc36e642012-10-29 13:08:03 -070065 * DMA_ATTR_STRONGLY_ORDERED: Specifies that accesses to the mapping must
66 * not be buffered, reordered, merged with other accesses, or unaligned.
67 * No speculative access may occur in this mapping.
68 */
Kyle Yan65be4a52016-10-31 15:05:00 -070069#define DMA_ATTR_STRONGLY_ORDERED (1UL << 9)
Laura Abbott91e4dc42014-08-05 19:39:38 -070070/*
71 * DMA_ATTR_SKIP_ZEROING: Do not zero mapping.
72 */
Kyle Yan65be4a52016-10-31 15:05:00 -070073#define DMA_ATTR_SKIP_ZEROING (1UL << 10)
Rohit Vaswani679ede32015-07-06 16:22:29 -070074/*
75 * DMA_ATTR_NO_DELAYED_UNMAP: Used by msm specific lazy mapping to indicate
76 * that the mapping can be freed on unmap, rather than when the ion_buffer
77 * is freed.
78 */
Kyle Yan65be4a52016-10-31 15:05:00 -070079#define DMA_ATTR_NO_DELAYED_UNMAP (1UL << 11)
Rohit Vaswanicc3ff312015-07-06 17:45:20 -070080/*
81 * DMA_ATTR_EXEC_MAPPING: The mapping has executable permissions.
82 */
Kyle Yan65be4a52016-10-31 15:05:00 -070083#define DMA_ATTR_EXEC_MAPPING (1UL << 12)
Patrick Daly827325a2016-11-03 13:22:13 -070084/*
85 * DMA_ATTR_IOMMU_USE_UPSTREAM_HINT: Normally an smmu will override any bus
86 * attributes (i.e cacheablilty) provided by the client device. Some hardware
87 * may be designed to use the original attributes instead.
88 */
89#define DMA_ATTR_IOMMU_USE_UPSTREAM_HINT (1UL << 13)
Liam Markb1b39b22016-12-09 14:42:52 -080090/*
91 * When passed to a DMA map call the DMA_ATTR_FORCE_COHERENT DMA
92 * attribute can be used to force a buffer to be mapped as IO coherent.
93 */
94#define DMA_ATTR_FORCE_COHERENT (1UL << 14)
95/*
96 * When passed to a DMA map call the DMA_ATTR_FORCE_NON_COHERENT DMA
97 * attribute can be used to force a buffer to not be mapped as IO
98 * coherent.
99 */
100#define DMA_ATTR_FORCE_NON_COHERENT (1UL << 15)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700101
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -0600102/*
103 * A dma_addr_t can hold any valid DMA or bus address for the platform.
104 * It can be given to a device to use as a DMA source or target. A CPU cannot
105 * reference a dma_addr_t directly because there may be translation between
106 * its physical address space and the bus address space.
107 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900108struct dma_map_ops {
Marek Szyprowski613c4572012-03-28 16:36:27 +0200109 void* (*alloc)(struct device *dev, size_t size,
110 dma_addr_t *dma_handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700111 unsigned long attrs);
Marek Szyprowski613c4572012-03-28 16:36:27 +0200112 void (*free)(struct device *dev, size_t size,
113 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700114 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +0100115 int (*mmap)(struct device *, struct vm_area_struct *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700116 void *, dma_addr_t, size_t,
117 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +0100118
Marek Szyprowskid2b74282012-06-13 10:05:52 +0200119 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700120 dma_addr_t, size_t, unsigned long attrs);
Marek Szyprowskid2b74282012-06-13 10:05:52 +0200121
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900122 dma_addr_t (*map_page)(struct device *dev, struct page *page,
123 unsigned long offset, size_t size,
124 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700125 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900126 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
127 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700128 unsigned long attrs);
Ricardo Ribalda Delgado04abab62015-02-11 13:53:15 +0100129 /*
130 * map_sg returns 0 on error and a value > 0 on success.
131 * It should never return a value < 0.
132 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900133 int (*map_sg)(struct device *dev, struct scatterlist *sg,
134 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700135 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900136 void (*unmap_sg)(struct device *dev,
137 struct scatterlist *sg, int nents,
138 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700139 unsigned long attrs);
Niklas Söderlundba409b32016-08-10 13:22:14 +0200140 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
141 size_t size, enum dma_data_direction dir,
142 unsigned long attrs);
143 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
144 size_t size, enum dma_data_direction dir,
145 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900146 void (*sync_single_for_cpu)(struct device *dev,
147 dma_addr_t dma_handle, size_t size,
148 enum dma_data_direction dir);
149 void (*sync_single_for_device)(struct device *dev,
150 dma_addr_t dma_handle, size_t size,
151 enum dma_data_direction dir);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900152 void (*sync_sg_for_cpu)(struct device *dev,
153 struct scatterlist *sg, int nents,
154 enum dma_data_direction dir);
155 void (*sync_sg_for_device)(struct device *dev,
156 struct scatterlist *sg, int nents,
157 enum dma_data_direction dir);
158 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
159 int (*dma_supported)(struct device *dev, u64 mask);
FUJITA Tomonorif726f30e2009-08-04 19:08:24 +0000160 int (*set_dma_mask)(struct device *dev, u64 mask);
Laura Abbott060e2df2014-08-05 19:16:28 -0700161 void *(*remap)(struct device *dev, void *cpu_addr, dma_addr_t handle,
162 size_t size, unsigned long attrs);
163 void (*unremap)(struct device *dev, void *remapped_address,
164 size_t size);
Milton Miller3a8f7552011-06-24 09:05:23 +0000165#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
166 u64 (*get_required_mask)(struct device *dev);
167#endif
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900168 int is_phys;
169};
170
Christian Borntraegera8463d42016-02-02 21:46:32 -0800171extern struct dma_map_ops dma_noop_ops;
172
Andrew Morton8f286c32007-10-18 03:05:07 -0700173#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
Borislav Petkov34c65382007-10-18 03:05:06 -0700174
James Bottomley32e8f702007-10-16 01:23:55 -0700175#define DMA_MASK_NONE 0x0ULL
176
Rolf Eike Beerd6bd3a32006-09-29 01:59:48 -0700177static inline int valid_dma_direction(int dma_direction)
178{
179 return ((dma_direction == DMA_BIDIRECTIONAL) ||
180 (dma_direction == DMA_TO_DEVICE) ||
181 (dma_direction == DMA_FROM_DEVICE));
182}
183
James Bottomley32e8f702007-10-16 01:23:55 -0700184static inline int is_device_dma_capable(struct device *dev)
185{
186 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
187}
188
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800189#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
190/*
191 * These three functions are only for dma allocator.
192 * Don't use them in device drivers.
193 */
194int dma_alloc_from_coherent(struct device *dev, ssize_t size,
195 dma_addr_t *dma_handle, void **ret);
196int dma_release_from_coherent(struct device *dev, int order, void *vaddr);
197
198int dma_mmap_from_coherent(struct device *dev, struct vm_area_struct *vma,
199 void *cpu_addr, size_t size, int *ret);
200#else
201#define dma_alloc_from_coherent(dev, size, handle, ret) (0)
202#define dma_release_from_coherent(dev, order, vaddr) (0)
203#define dma_mmap_from_coherent(dev, vma, vaddr, order, ret) (0)
204#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
205
Dan Williams1b0fac42007-07-15 23:40:26 -0700206#ifdef CONFIG_HAS_DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207#include <asm/dma-mapping.h>
Dan Williams1b0fac42007-07-15 23:40:26 -0700208#else
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800209/*
210 * Define the dma api to allow compilation but not linking of
211 * dma dependent code. Code that depends on the dma-mapping
212 * API needs to set 'depends on HAS_DMA' in its Kconfig
213 */
214extern struct dma_map_ops bad_dma_ops;
215static inline struct dma_map_ops *get_dma_ops(struct device *dev)
216{
217 return &bad_dma_ops;
218}
219#endif
220
221static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
222 size_t size,
223 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700224 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800225{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700226 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800227 dma_addr_t addr;
228
229 kmemcheck_mark_initialized(ptr, size);
230 BUG_ON(!valid_dma_direction(dir));
231 addr = ops->map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800232 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800233 dir, attrs);
234 debug_dma_map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800235 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800236 dir, addr, true);
237 return addr;
238}
239
240static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
241 size_t size,
242 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700243 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800244{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700245 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800246
247 BUG_ON(!valid_dma_direction(dir));
248 if (ops->unmap_page)
249 ops->unmap_page(dev, addr, size, dir, attrs);
250 debug_dma_unmap_page(dev, addr, size, dir, true);
251}
252
253/*
254 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
255 * It should never return a value < 0.
256 */
257static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
258 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700259 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800260{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700261 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800262 int i, ents;
263 struct scatterlist *s;
264
265 for_each_sg(sg, s, nents, i)
266 kmemcheck_mark_initialized(sg_virt(s), s->length);
267 BUG_ON(!valid_dma_direction(dir));
268 ents = ops->map_sg(dev, sg, nents, dir, attrs);
269 BUG_ON(ents < 0);
270 debug_dma_map_sg(dev, sg, nents, ents, dir);
271
272 return ents;
273}
274
275static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
276 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700277 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800278{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700279 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800280
281 BUG_ON(!valid_dma_direction(dir));
282 debug_dma_unmap_sg(dev, sg, nents, dir);
283 if (ops->unmap_sg)
284 ops->unmap_sg(dev, sg, nents, dir, attrs);
285}
286
287static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
288 size_t offset, size_t size,
289 enum dma_data_direction dir)
290{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700291 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800292 dma_addr_t addr;
293
294 kmemcheck_mark_initialized(page_address(page) + offset, size);
295 BUG_ON(!valid_dma_direction(dir));
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700296 addr = ops->map_page(dev, page, offset, size, dir, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800297 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
298
299 return addr;
300}
301
302static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
303 size_t size, enum dma_data_direction dir)
304{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700305 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800306
307 BUG_ON(!valid_dma_direction(dir));
308 if (ops->unmap_page)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700309 ops->unmap_page(dev, addr, size, dir, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800310 debug_dma_unmap_page(dev, addr, size, dir, false);
311}
312
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200313static inline dma_addr_t dma_map_resource(struct device *dev,
314 phys_addr_t phys_addr,
315 size_t size,
316 enum dma_data_direction dir,
317 unsigned long attrs)
318{
Kyle Yan65be4a52016-10-31 15:05:00 -0700319 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200320 dma_addr_t addr;
321
322 BUG_ON(!valid_dma_direction(dir));
323
324 /* Don't allow RAM to be mapped */
Niklas Söderlund3757dc42016-09-29 12:02:40 +0200325 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200326
327 addr = phys_addr;
328 if (ops->map_resource)
329 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
330
331 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
332
333 return addr;
334}
335
336static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
337 size_t size, enum dma_data_direction dir,
338 unsigned long attrs)
339{
Kyle Yan65be4a52016-10-31 15:05:00 -0700340 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200341
342 BUG_ON(!valid_dma_direction(dir));
343 if (ops->unmap_resource)
344 ops->unmap_resource(dev, addr, size, dir, attrs);
345 debug_dma_unmap_resource(dev, addr, size, dir);
346}
347
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800348static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
349 size_t size,
350 enum dma_data_direction dir)
351{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700352 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800353
354 BUG_ON(!valid_dma_direction(dir));
355 if (ops->sync_single_for_cpu)
356 ops->sync_single_for_cpu(dev, addr, size, dir);
357 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
358}
359
360static inline void dma_sync_single_for_device(struct device *dev,
361 dma_addr_t addr, size_t size,
362 enum dma_data_direction dir)
363{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700364 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800365
366 BUG_ON(!valid_dma_direction(dir));
367 if (ops->sync_single_for_device)
368 ops->sync_single_for_device(dev, addr, size, dir);
369 debug_dma_sync_single_for_device(dev, addr, size, dir);
370}
371
372static inline void dma_sync_single_range_for_cpu(struct device *dev,
373 dma_addr_t addr,
374 unsigned long offset,
375 size_t size,
376 enum dma_data_direction dir)
377{
378 const struct dma_map_ops *ops = get_dma_ops(dev);
379
380 BUG_ON(!valid_dma_direction(dir));
381 if (ops->sync_single_for_cpu)
382 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
383 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
384}
385
386static inline void dma_sync_single_range_for_device(struct device *dev,
387 dma_addr_t addr,
388 unsigned long offset,
389 size_t size,
390 enum dma_data_direction dir)
391{
392 const struct dma_map_ops *ops = get_dma_ops(dev);
393
394 BUG_ON(!valid_dma_direction(dir));
395 if (ops->sync_single_for_device)
396 ops->sync_single_for_device(dev, addr + offset, size, dir);
397 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
398}
399
400static inline void
401dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
402 int nelems, enum dma_data_direction dir)
403{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700404 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800405
406 BUG_ON(!valid_dma_direction(dir));
407 if (ops->sync_sg_for_cpu)
408 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
409 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
410}
411
412static inline void
413dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
414 int nelems, enum dma_data_direction dir)
415{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700416 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800417
418 BUG_ON(!valid_dma_direction(dir));
419 if (ops->sync_sg_for_device)
420 ops->sync_sg_for_device(dev, sg, nelems, dir);
421 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
422
423}
424
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700425#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
426#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
427#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
428#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800429
430extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
431 void *cpu_addr, dma_addr_t dma_addr, size_t size);
432
433void *dma_common_contiguous_remap(struct page *page, size_t size,
434 unsigned long vm_flags,
435 pgprot_t prot, const void *caller);
436
437void *dma_common_pages_remap(struct page **pages, size_t size,
438 unsigned long vm_flags, pgprot_t prot,
439 const void *caller);
Laura Abbott060e2df2014-08-05 19:16:28 -0700440void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags,
441 bool nowarn);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800442
443/**
444 * dma_mmap_attrs - map a coherent DMA allocation into user space
445 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
446 * @vma: vm_area_struct describing requested user mapping
447 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
448 * @handle: device-view address returned from dma_alloc_attrs
449 * @size: size of memory originally requested in dma_alloc_attrs
450 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
451 *
452 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
453 * into user space. The coherent DMA buffer must not be freed by the
454 * driver until the user space mapping has been released.
455 */
456static inline int
457dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700458 dma_addr_t dma_addr, size_t size, unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800459{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700460 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800461 BUG_ON(!ops);
462 if (ops->mmap)
463 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
464 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
465}
466
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700467#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800468
469int
470dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
471 void *cpu_addr, dma_addr_t dma_addr, size_t size);
472
473static inline int
474dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700475 dma_addr_t dma_addr, size_t size,
476 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800477{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700478 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800479 BUG_ON(!ops);
480 if (ops->get_sgtable)
481 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
482 attrs);
483 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
484}
485
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700486#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800487
488#ifndef arch_dma_alloc_attrs
489#define arch_dma_alloc_attrs(dev, flag) (true)
490#endif
491
492static inline void *dma_alloc_attrs(struct device *dev, size_t size,
493 dma_addr_t *dma_handle, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700494 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800495{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700496 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800497 void *cpu_addr;
498
499 BUG_ON(!ops);
500
501 if (dma_alloc_from_coherent(dev, size, dma_handle, &cpu_addr))
502 return cpu_addr;
503
504 if (!arch_dma_alloc_attrs(&dev, &flag))
505 return NULL;
506 if (!ops->alloc)
507 return NULL;
508
509 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
510 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
511 return cpu_addr;
512}
513
514static inline void dma_free_attrs(struct device *dev, size_t size,
515 void *cpu_addr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700516 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800517{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700518 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800519
520 BUG_ON(!ops);
521 WARN_ON(irqs_disabled());
522
523 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
524 return;
525
Zhen Leid6b7eae2016-03-09 14:08:38 -0800526 if (!ops->free || !cpu_addr)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800527 return;
528
529 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
530 ops->free(dev, size, cpu_addr, dma_handle, attrs);
531}
532
533static inline void *dma_alloc_coherent(struct device *dev, size_t size,
534 dma_addr_t *dma_handle, gfp_t flag)
535{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700536 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800537}
538
539static inline void dma_free_coherent(struct device *dev, size_t size,
540 void *cpu_addr, dma_addr_t dma_handle)
541{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700542 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800543}
544
545static inline void *dma_alloc_noncoherent(struct device *dev, size_t size,
546 dma_addr_t *dma_handle, gfp_t gfp)
547{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700548 return dma_alloc_attrs(dev, size, dma_handle, gfp,
549 DMA_ATTR_NON_CONSISTENT);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800550}
551
552static inline void dma_free_noncoherent(struct device *dev, size_t size,
553 void *cpu_addr, dma_addr_t dma_handle)
554{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700555 dma_free_attrs(dev, size, cpu_addr, dma_handle,
556 DMA_ATTR_NON_CONSISTENT);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800557}
558
559static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
560{
561 debug_dma_mapping_error(dev, dma_addr);
562
563 if (get_dma_ops(dev)->mapping_error)
564 return get_dma_ops(dev)->mapping_error(dev, dma_addr);
565
566#ifdef DMA_ERROR_CODE
567 return dma_addr == DMA_ERROR_CODE;
568#else
569 return 0;
570#endif
571}
572
573#ifndef HAVE_ARCH_DMA_SUPPORTED
574static inline int dma_supported(struct device *dev, u64 mask)
575{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700576 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800577
578 if (!ops)
579 return 0;
580 if (!ops->dma_supported)
581 return 1;
582 return ops->dma_supported(dev, mask);
583}
584#endif
585
586#ifndef HAVE_ARCH_DMA_SET_MASK
587static inline int dma_set_mask(struct device *dev, u64 mask)
588{
Mitchel Humpherys56a826c2014-09-09 16:02:15 -0700589 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800590
591 if (ops->set_dma_mask)
592 return ops->set_dma_mask(dev, mask);
593
594 if (!dev->dma_mask || !dma_supported(dev, mask))
595 return -EIO;
596 *dev->dma_mask = mask;
597 return 0;
598}
Dan Williams1b0fac42007-07-15 23:40:26 -0700599#endif
Laura Abbott060e2df2014-08-05 19:16:28 -0700600static inline void *dma_remap(struct device *dev, void *cpu_addr,
601 dma_addr_t dma_handle, size_t size, unsigned long attrs)
602{
603 const struct dma_map_ops *ops = get_dma_ops(dev);
604
605 if (!ops->remap) {
606 WARN_ONCE(1, "Remap function not implemented for %pS\n",
607 ops->remap);
608 return NULL;
609 }
610
611 return ops->remap(dev, cpu_addr, dma_handle, size, attrs);
612}
613
614
615static inline void dma_unremap(struct device *dev, void *remapped_addr,
616 size_t size)
617{
618 const struct dma_map_ops *ops = get_dma_ops(dev);
619
620 if (!ops->unremap) {
621 WARN_ONCE(1, "unremap function not implemented for %pS\n",
622 ops->unremap);
623 return;
624 }
625
626 return ops->unremap(dev, remapped_addr, size);
627}
628
Linus Torvalds1da177e2005-04-16 15:20:36 -0700629
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900630static inline u64 dma_get_mask(struct device *dev)
631{
FUJITA Tomonori07a2c012008-09-19 02:02:05 +0900632 if (dev && dev->dma_mask && *dev->dma_mask)
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900633 return *dev->dma_mask;
Yang Hongyang284901a2009-04-06 19:01:15 -0700634 return DMA_BIT_MASK(32);
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900635}
636
Rob Herring58af4a22012-03-20 14:33:01 -0500637#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700638int dma_set_coherent_mask(struct device *dev, u64 mask);
639#else
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800640static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
641{
642 if (!dma_supported(dev, mask))
643 return -EIO;
644 dev->coherent_dma_mask = mask;
645 return 0;
646}
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700647#endif
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800648
Russell King4aa806b2013-06-26 13:49:44 +0100649/*
650 * Set both the DMA mask and the coherent DMA mask to the same thing.
651 * Note that we don't check the return value from dma_set_coherent_mask()
652 * as the DMA API guarantees that the coherent DMA mask can be set to
653 * the same or smaller than the streaming DMA mask.
654 */
655static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
656{
657 int rc = dma_set_mask(dev, mask);
658 if (rc == 0)
659 dma_set_coherent_mask(dev, mask);
660 return rc;
661}
662
Russell Kingfa6a8d62013-06-27 12:21:45 +0100663/*
664 * Similar to the above, except it deals with the case where the device
665 * does not have dev->dma_mask appropriately setup.
666 */
667static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
668{
669 dev->dma_mask = &dev->coherent_dma_mask;
670 return dma_set_mask_and_coherent(dev, mask);
671}
672
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673extern u64 dma_get_required_mask(struct device *dev);
674
Will Deacona3a60f82014-08-27 15:49:10 +0100675#ifndef arch_setup_dma_ops
Will Deacon97890ba2014-08-27 16:24:20 +0100676static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
Robin Murphy53c92d72016-04-07 18:42:05 +0100677 u64 size, const struct iommu_ops *iommu,
Will Deacon97890ba2014-08-27 16:24:20 +0100678 bool coherent) { }
679#endif
680
681#ifndef arch_teardown_dma_ops
682static inline void arch_teardown_dma_ops(struct device *dev) { }
Santosh Shilimkar591c1ee2014-04-24 11:30:04 -0400683#endif
684
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800685static inline unsigned int dma_get_max_seg_size(struct device *dev)
686{
Robin Murphy002edb62015-11-06 16:32:51 -0800687 if (dev->dma_parms && dev->dma_parms->max_segment_size)
688 return dev->dma_parms->max_segment_size;
689 return SZ_64K;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800690}
691
692static inline unsigned int dma_set_max_seg_size(struct device *dev,
693 unsigned int size)
694{
695 if (dev->dma_parms) {
696 dev->dma_parms->max_segment_size = size;
697 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800698 }
699 return -EIO;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800700}
701
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800702static inline unsigned long dma_get_seg_boundary(struct device *dev)
703{
Robin Murphy002edb62015-11-06 16:32:51 -0800704 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
705 return dev->dma_parms->segment_boundary_mask;
706 return DMA_BIT_MASK(32);
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800707}
708
709static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
710{
711 if (dev->dma_parms) {
712 dev->dma_parms->segment_boundary_mask = mask;
713 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800714 }
715 return -EIO;
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800716}
717
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100718#ifndef dma_max_pfn
719static inline unsigned long dma_max_pfn(struct device *dev)
720{
721 return *dev->dma_mask >> PAGE_SHIFT;
722}
723#endif
724
Andrew Morton842fa692011-11-02 13:39:33 -0700725static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
726 dma_addr_t *dma_handle, gfp_t flag)
727{
Joe Perchesede23fa2013-08-26 22:45:23 -0700728 void *ret = dma_alloc_coherent(dev, size, dma_handle,
729 flag | __GFP_ZERO);
Andrew Morton842fa692011-11-02 13:39:33 -0700730 return ret;
731}
732
Heiko Carstense259f192010-08-13 09:39:18 +0200733#ifdef CONFIG_HAS_DMA
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700734static inline int dma_get_cache_alignment(void)
735{
736#ifdef ARCH_DMA_MINALIGN
737 return ARCH_DMA_MINALIGN;
738#endif
739 return 1;
740}
Heiko Carstense259f192010-08-13 09:39:18 +0200741#endif
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700742
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743/* flags for the coherent memory api */
744#define DMA_MEMORY_MAP 0x01
745#define DMA_MEMORY_IO 0x02
746#define DMA_MEMORY_INCLUDES_CHILDREN 0x04
747#define DMA_MEMORY_EXCLUSIVE 0x08
748
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800749#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
750int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
751 dma_addr_t device_addr, size_t size, int flags);
752void dma_release_declared_memory(struct device *dev);
753void *dma_mark_declared_memory_occupied(struct device *dev,
754 dma_addr_t device_addr, size_t size);
755#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756static inline int
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600757dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700758 dma_addr_t device_addr, size_t size, int flags)
759{
760 return 0;
761}
762
763static inline void
764dma_release_declared_memory(struct device *dev)
765{
766}
767
768static inline void *
769dma_mark_declared_memory_occupied(struct device *dev,
770 dma_addr_t device_addr, size_t size)
771{
772 return ERR_PTR(-EBUSY);
773}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800774#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700775
Tejun Heo9ac78492007-01-20 16:00:26 +0900776/*
777 * Managed DMA API
778 */
779extern void *dmam_alloc_coherent(struct device *dev, size_t size,
780 dma_addr_t *dma_handle, gfp_t gfp);
781extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
782 dma_addr_t dma_handle);
783extern void *dmam_alloc_noncoherent(struct device *dev, size_t size,
784 dma_addr_t *dma_handle, gfp_t gfp);
785extern void dmam_free_noncoherent(struct device *dev, size_t size, void *vaddr,
786 dma_addr_t dma_handle);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800787#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600788extern int dmam_declare_coherent_memory(struct device *dev,
789 phys_addr_t phys_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900790 dma_addr_t device_addr, size_t size,
791 int flags);
792extern void dmam_release_declared_memory(struct device *dev);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800793#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900794static inline int dmam_declare_coherent_memory(struct device *dev,
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600795 phys_addr_t phys_addr, dma_addr_t device_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900796 size_t size, gfp_t gfp)
797{
798 return 0;
799}
800
801static inline void dmam_release_declared_memory(struct device *dev)
802{
803}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800804#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900805
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800806static inline void *dma_alloc_wc(struct device *dev, size_t size,
807 dma_addr_t *dma_addr, gfp_t gfp)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200808{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700809 return dma_alloc_attrs(dev, size, dma_addr, gfp,
810 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200811}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800812#ifndef dma_alloc_writecombine
813#define dma_alloc_writecombine dma_alloc_wc
814#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200815
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800816static inline void dma_free_wc(struct device *dev, size_t size,
817 void *cpu_addr, dma_addr_t dma_addr)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200818{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700819 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
820 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200821}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800822#ifndef dma_free_writecombine
823#define dma_free_writecombine dma_free_wc
824#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200825
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800826static inline int dma_mmap_wc(struct device *dev,
827 struct vm_area_struct *vma,
828 void *cpu_addr, dma_addr_t dma_addr,
829 size_t size)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200830{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700831 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
832 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200833}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800834#ifndef dma_mmap_writecombine
835#define dma_mmap_writecombine dma_mmap_wc
836#endif
Arthur Kepner74bc7ce2008-04-29 01:00:30 -0700837
Laura Abbott5581b1b2014-07-31 12:43:26 -0700838static inline void *dma_alloc_nonconsistent(struct device *dev, size_t size,
839 dma_addr_t *dma_handle, gfp_t flag)
840{
841 unsigned long attrs = DMA_ATTR_NON_CONSISTENT;
842
843 return dma_alloc_attrs(dev, size, dma_handle, flag, attrs);
844}
845
846static inline void dma_free_nonconsistent(struct device *dev, size_t size,
847 void *cpu_addr, dma_addr_t dma_handle)
848{
849 unsigned long attrs = DMA_ATTR_NON_CONSISTENT;
850
851 return dma_free_attrs(dev, size, cpu_addr, dma_handle, attrs);
852}
853
854static inline int dma_mmap_nonconsistent(struct device *dev,
855 struct vm_area_struct *vma, void *cpu_addr,
856 dma_addr_t dma_addr, size_t size)
857{
Patrick Dalyc4a72b12017-04-24 21:03:20 -0700858 unsigned long attrs = DMA_ATTR_NON_CONSISTENT;
859
860 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size, attrs);
Laura Abbott5581b1b2014-07-31 12:43:26 -0700861}
862
Andrey Smirnov24813662016-09-28 15:22:33 -0700863#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
FUJITA Tomonori0acedc12010-03-10 15:23:31 -0800864#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
865#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
866#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
867#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
868#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
869#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
870#else
871#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
872#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
873#define dma_unmap_addr(PTR, ADDR_NAME) (0)
874#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
875#define dma_unmap_len(PTR, LEN_NAME) (0)
876#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
877#endif
878
Linus Torvalds1da177e2005-04-16 15:20:36 -0700879#endif