blob: 028a375d240d8a64aec1e0e881a88f4b287438e5 [file] [log] [blame]
Robert P. J. Day96532ba2008-02-03 15:06:26 +02001#ifndef _LINUX_DMA_MAPPING_H
2#define _LINUX_DMA_MAPPING_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
Robin Murphy002edb62015-11-06 16:32:51 -08004#include <linux/sizes.h>
Andrew Morton842fa692011-11-02 13:39:33 -07005#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/device.h>
7#include <linux/err.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -08008#include <linux/dma-debug.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +00009#include <linux/dma-direction.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090010#include <linux/scatterlist.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -080011#include <linux/kmemcheck.h>
12#include <linux/bug.h>
Tom Lendacky648babb2017-07-17 16:10:22 -050013#include <linux/mem_encrypt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070015/**
16 * List of possible attributes associated with a DMA mapping. The semantics
17 * of each attribute should be defined in Documentation/DMA-attributes.txt.
18 *
19 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
20 * forces all pending DMA writes to complete.
21 */
22#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
23/*
24 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
25 * may be weakly ordered, that is that reads and writes may pass each other.
26 */
27#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
28/*
29 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
30 * buffered to improve performance.
31 */
32#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
33/*
34 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
35 * consistent or non-consistent memory as it sees fit.
36 */
37#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
38/*
39 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
40 * virtual mapping for the allocated buffer.
41 */
42#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
43/*
44 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
45 * the CPU cache for the given buffer assuming that it has been already
46 * transferred to 'device' domain.
47 */
48#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
49/*
50 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
51 * in physical memory.
52 */
53#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
54/*
55 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
56 * that it's probably not worth the time to try to allocate memory to in a way
57 * that gives better TLB efficiency.
58 */
59#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
Mauricio Faria de Oliveiraa9a62c92016-10-11 13:54:14 -070060/*
61 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
62 * allocation failure reports (similarly to __GFP_NOWARN).
63 */
64#define DMA_ATTR_NO_WARN (1UL << 8)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070065
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060066/*
Mitchel Humpherysb2fb3662017-01-06 18:58:11 +053067 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
68 * accessible at an elevated privilege level (and ideally inaccessible or
69 * at least read-only at lesser-privileged levels).
70 */
71#define DMA_ATTR_PRIVILEGED (1UL << 9)
72
73/*
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060074 * A dma_addr_t can hold any valid DMA or bus address for the platform.
75 * It can be given to a device to use as a DMA source or target. A CPU cannot
76 * reference a dma_addr_t directly because there may be translation between
77 * its physical address space and the bus address space.
78 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090079struct dma_map_ops {
Marek Szyprowski613c4572012-03-28 16:36:27 +020080 void* (*alloc)(struct device *dev, size_t size,
81 dma_addr_t *dma_handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070082 unsigned long attrs);
Marek Szyprowski613c4572012-03-28 16:36:27 +020083 void (*free)(struct device *dev, size_t size,
84 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070085 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010086 int (*mmap)(struct device *, struct vm_area_struct *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070087 void *, dma_addr_t, size_t,
88 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010089
Marek Szyprowskid2b74282012-06-13 10:05:52 +020090 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070091 dma_addr_t, size_t, unsigned long attrs);
Marek Szyprowskid2b74282012-06-13 10:05:52 +020092
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090093 dma_addr_t (*map_page)(struct device *dev, struct page *page,
94 unsigned long offset, size_t size,
95 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070096 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090097 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
98 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070099 unsigned long attrs);
Ricardo Ribalda Delgado04abab62015-02-11 13:53:15 +0100100 /*
101 * map_sg returns 0 on error and a value > 0 on success.
102 * It should never return a value < 0.
103 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900104 int (*map_sg)(struct device *dev, struct scatterlist *sg,
105 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700106 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900107 void (*unmap_sg)(struct device *dev,
108 struct scatterlist *sg, int nents,
109 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700110 unsigned long attrs);
Niklas Söderlundba409b32016-08-10 13:22:14 +0200111 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
112 size_t size, enum dma_data_direction dir,
113 unsigned long attrs);
114 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
115 size_t size, enum dma_data_direction dir,
116 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900117 void (*sync_single_for_cpu)(struct device *dev,
118 dma_addr_t dma_handle, size_t size,
119 enum dma_data_direction dir);
120 void (*sync_single_for_device)(struct device *dev,
121 dma_addr_t dma_handle, size_t size,
122 enum dma_data_direction dir);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900123 void (*sync_sg_for_cpu)(struct device *dev,
124 struct scatterlist *sg, int nents,
125 enum dma_data_direction dir);
126 void (*sync_sg_for_device)(struct device *dev,
127 struct scatterlist *sg, int nents,
128 enum dma_data_direction dir);
Christoph Hellwigc9eb6172017-08-27 10:37:15 +0200129 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
130 enum dma_data_direction direction);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900131 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
132 int (*dma_supported)(struct device *dev, u64 mask);
Milton Miller3a8f7552011-06-24 09:05:23 +0000133#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
134 u64 (*get_required_mask)(struct device *dev);
135#endif
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900136 int is_phys;
137};
138
Bart Van Assche52997092017-01-20 13:04:01 -0800139extern const struct dma_map_ops dma_noop_ops;
Bart Van Assche551199a2017-01-20 13:04:07 -0800140extern const struct dma_map_ops dma_virt_ops;
Christian Borntraegera8463d42016-02-02 21:46:32 -0800141
Andrew Morton8f286c32007-10-18 03:05:07 -0700142#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
Borislav Petkov34c65382007-10-18 03:05:06 -0700143
James Bottomley32e8f702007-10-16 01:23:55 -0700144#define DMA_MASK_NONE 0x0ULL
145
Rolf Eike Beerd6bd3a32006-09-29 01:59:48 -0700146static inline int valid_dma_direction(int dma_direction)
147{
148 return ((dma_direction == DMA_BIDIRECTIONAL) ||
149 (dma_direction == DMA_TO_DEVICE) ||
150 (dma_direction == DMA_FROM_DEVICE));
151}
152
James Bottomley32e8f702007-10-16 01:23:55 -0700153static inline int is_device_dma_capable(struct device *dev)
154{
155 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
156}
157
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800158#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
159/*
160 * These three functions are only for dma allocator.
161 * Don't use them in device drivers.
162 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100163int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800164 dma_addr_t *dma_handle, void **ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100165int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800166
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100167int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800168 void *cpu_addr, size_t size, int *ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100169
170void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
171int dma_release_from_global_coherent(int order, void *vaddr);
172int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
173 size_t size, int *ret);
174
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800175#else
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100176#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
177#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
178#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
179
180static inline void *dma_alloc_from_global_coherent(ssize_t size,
181 dma_addr_t *dma_handle)
182{
183 return NULL;
184}
185
186static inline int dma_release_from_global_coherent(int order, void *vaddr)
187{
188 return 0;
189}
190
191static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
192 void *cpu_addr, size_t size,
193 int *ret)
194{
195 return 0;
196}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800197#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
198
Dan Williams1b0fac42007-07-15 23:40:26 -0700199#ifdef CONFIG_HAS_DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200#include <asm/dma-mapping.h>
Bart Van Assche815dd182017-01-20 13:04:04 -0800201static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
202{
203 if (dev && dev->dma_ops)
204 return dev->dma_ops;
205 return get_arch_dma_ops(dev ? dev->bus : NULL);
206}
207
Bart Van Asscheca6e8e12017-01-20 13:04:03 -0800208static inline void set_dma_ops(struct device *dev,
209 const struct dma_map_ops *dma_ops)
210{
211 dev->dma_ops = dma_ops;
212}
Dan Williams1b0fac42007-07-15 23:40:26 -0700213#else
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800214/*
215 * Define the dma api to allow compilation but not linking of
216 * dma dependent code. Code that depends on the dma-mapping
217 * API needs to set 'depends on HAS_DMA' in its Kconfig
218 */
Bart Van Assche52997092017-01-20 13:04:01 -0800219extern const struct dma_map_ops bad_dma_ops;
220static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800221{
222 return &bad_dma_ops;
223}
224#endif
225
226static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
227 size_t size,
228 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700229 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800230{
Bart Van Assche52997092017-01-20 13:04:01 -0800231 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800232 dma_addr_t addr;
233
234 kmemcheck_mark_initialized(ptr, size);
235 BUG_ON(!valid_dma_direction(dir));
236 addr = ops->map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800237 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800238 dir, attrs);
239 debug_dma_map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800240 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800241 dir, addr, true);
242 return addr;
243}
244
245static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
246 size_t size,
247 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700248 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800249{
Bart Van Assche52997092017-01-20 13:04:01 -0800250 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800251
252 BUG_ON(!valid_dma_direction(dir));
253 if (ops->unmap_page)
254 ops->unmap_page(dev, addr, size, dir, attrs);
255 debug_dma_unmap_page(dev, addr, size, dir, true);
256}
257
258/*
259 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
260 * It should never return a value < 0.
261 */
262static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
263 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700264 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800265{
Bart Van Assche52997092017-01-20 13:04:01 -0800266 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800267 int i, ents;
268 struct scatterlist *s;
269
270 for_each_sg(sg, s, nents, i)
271 kmemcheck_mark_initialized(sg_virt(s), s->length);
272 BUG_ON(!valid_dma_direction(dir));
273 ents = ops->map_sg(dev, sg, nents, dir, attrs);
274 BUG_ON(ents < 0);
275 debug_dma_map_sg(dev, sg, nents, ents, dir);
276
277 return ents;
278}
279
280static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
281 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700282 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800283{
Bart Van Assche52997092017-01-20 13:04:01 -0800284 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800285
286 BUG_ON(!valid_dma_direction(dir));
287 debug_dma_unmap_sg(dev, sg, nents, dir);
288 if (ops->unmap_sg)
289 ops->unmap_sg(dev, sg, nents, dir, attrs);
290}
291
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800292static inline dma_addr_t dma_map_page_attrs(struct device *dev,
293 struct page *page,
294 size_t offset, size_t size,
295 enum dma_data_direction dir,
296 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800297{
Bart Van Assche52997092017-01-20 13:04:01 -0800298 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800299 dma_addr_t addr;
300
301 kmemcheck_mark_initialized(page_address(page) + offset, size);
302 BUG_ON(!valid_dma_direction(dir));
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800303 addr = ops->map_page(dev, page, offset, size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800304 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
305
306 return addr;
307}
308
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800309static inline void dma_unmap_page_attrs(struct device *dev,
310 dma_addr_t addr, size_t size,
311 enum dma_data_direction dir,
312 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800313{
Bart Van Assche52997092017-01-20 13:04:01 -0800314 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800315
316 BUG_ON(!valid_dma_direction(dir));
317 if (ops->unmap_page)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800318 ops->unmap_page(dev, addr, size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800319 debug_dma_unmap_page(dev, addr, size, dir, false);
320}
321
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200322static inline dma_addr_t dma_map_resource(struct device *dev,
323 phys_addr_t phys_addr,
324 size_t size,
325 enum dma_data_direction dir,
326 unsigned long attrs)
327{
Bart Van Assche52997092017-01-20 13:04:01 -0800328 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200329 dma_addr_t addr;
330
331 BUG_ON(!valid_dma_direction(dir));
332
333 /* Don't allow RAM to be mapped */
Niklas Söderlund3757dc42016-09-29 12:02:40 +0200334 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200335
336 addr = phys_addr;
337 if (ops->map_resource)
338 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
339
340 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
341
342 return addr;
343}
344
345static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
346 size_t size, enum dma_data_direction dir,
347 unsigned long attrs)
348{
Bart Van Assche52997092017-01-20 13:04:01 -0800349 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200350
351 BUG_ON(!valid_dma_direction(dir));
352 if (ops->unmap_resource)
353 ops->unmap_resource(dev, addr, size, dir, attrs);
354 debug_dma_unmap_resource(dev, addr, size, dir);
355}
356
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800357static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
358 size_t size,
359 enum dma_data_direction dir)
360{
Bart Van Assche52997092017-01-20 13:04:01 -0800361 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800362
363 BUG_ON(!valid_dma_direction(dir));
364 if (ops->sync_single_for_cpu)
365 ops->sync_single_for_cpu(dev, addr, size, dir);
366 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
367}
368
369static inline void dma_sync_single_for_device(struct device *dev,
370 dma_addr_t addr, size_t size,
371 enum dma_data_direction dir)
372{
Bart Van Assche52997092017-01-20 13:04:01 -0800373 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800374
375 BUG_ON(!valid_dma_direction(dir));
376 if (ops->sync_single_for_device)
377 ops->sync_single_for_device(dev, addr, size, dir);
378 debug_dma_sync_single_for_device(dev, addr, size, dir);
379}
380
381static inline void dma_sync_single_range_for_cpu(struct device *dev,
382 dma_addr_t addr,
383 unsigned long offset,
384 size_t size,
385 enum dma_data_direction dir)
386{
387 const struct dma_map_ops *ops = get_dma_ops(dev);
388
389 BUG_ON(!valid_dma_direction(dir));
390 if (ops->sync_single_for_cpu)
391 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
392 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
393}
394
395static inline void dma_sync_single_range_for_device(struct device *dev,
396 dma_addr_t addr,
397 unsigned long offset,
398 size_t size,
399 enum dma_data_direction dir)
400{
401 const struct dma_map_ops *ops = get_dma_ops(dev);
402
403 BUG_ON(!valid_dma_direction(dir));
404 if (ops->sync_single_for_device)
405 ops->sync_single_for_device(dev, addr + offset, size, dir);
406 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
407}
408
409static inline void
410dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
411 int nelems, enum dma_data_direction dir)
412{
Bart Van Assche52997092017-01-20 13:04:01 -0800413 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800414
415 BUG_ON(!valid_dma_direction(dir));
416 if (ops->sync_sg_for_cpu)
417 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
418 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
419}
420
421static inline void
422dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
423 int nelems, enum dma_data_direction dir)
424{
Bart Van Assche52997092017-01-20 13:04:01 -0800425 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800426
427 BUG_ON(!valid_dma_direction(dir));
428 if (ops->sync_sg_for_device)
429 ops->sync_sg_for_device(dev, sg, nelems, dir);
430 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
431
432}
433
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700434#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
435#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
436#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
437#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800438#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
439#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800440
Christoph Hellwigc9eb6172017-08-27 10:37:15 +0200441static inline void
442dma_cache_sync(struct device *dev, void *vaddr, size_t size,
443 enum dma_data_direction dir)
444{
445 const struct dma_map_ops *ops = get_dma_ops(dev);
446
447 BUG_ON(!valid_dma_direction(dir));
448 if (ops->cache_sync)
449 ops->cache_sync(dev, vaddr, size, dir);
450}
451
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800452extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
453 void *cpu_addr, dma_addr_t dma_addr, size_t size);
454
455void *dma_common_contiguous_remap(struct page *page, size_t size,
456 unsigned long vm_flags,
457 pgprot_t prot, const void *caller);
458
459void *dma_common_pages_remap(struct page **pages, size_t size,
460 unsigned long vm_flags, pgprot_t prot,
461 const void *caller);
462void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
463
464/**
465 * dma_mmap_attrs - map a coherent DMA allocation into user space
466 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
467 * @vma: vm_area_struct describing requested user mapping
468 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
469 * @handle: device-view address returned from dma_alloc_attrs
470 * @size: size of memory originally requested in dma_alloc_attrs
471 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
472 *
473 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
474 * into user space. The coherent DMA buffer must not be freed by the
475 * driver until the user space mapping has been released.
476 */
477static inline int
478dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700479 dma_addr_t dma_addr, size_t size, unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800480{
Bart Van Assche52997092017-01-20 13:04:01 -0800481 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800482 BUG_ON(!ops);
483 if (ops->mmap)
484 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
485 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
486}
487
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700488#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800489
490int
491dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
492 void *cpu_addr, dma_addr_t dma_addr, size_t size);
493
494static inline int
495dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700496 dma_addr_t dma_addr, size_t size,
497 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800498{
Bart Van Assche52997092017-01-20 13:04:01 -0800499 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800500 BUG_ON(!ops);
501 if (ops->get_sgtable)
502 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
503 attrs);
504 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
505}
506
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700507#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800508
509#ifndef arch_dma_alloc_attrs
510#define arch_dma_alloc_attrs(dev, flag) (true)
511#endif
512
513static inline void *dma_alloc_attrs(struct device *dev, size_t size,
514 dma_addr_t *dma_handle, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700515 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800516{
Bart Van Assche52997092017-01-20 13:04:01 -0800517 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800518 void *cpu_addr;
519
520 BUG_ON(!ops);
521
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100522 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800523 return cpu_addr;
524
525 if (!arch_dma_alloc_attrs(&dev, &flag))
526 return NULL;
527 if (!ops->alloc)
528 return NULL;
529
530 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
531 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
532 return cpu_addr;
533}
534
535static inline void dma_free_attrs(struct device *dev, size_t size,
536 void *cpu_addr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700537 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800538{
Bart Van Assche52997092017-01-20 13:04:01 -0800539 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800540
541 BUG_ON(!ops);
542 WARN_ON(irqs_disabled());
543
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100544 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800545 return;
546
Zhen Leid6b7eae2016-03-09 14:08:38 -0800547 if (!ops->free || !cpu_addr)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800548 return;
549
550 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
551 ops->free(dev, size, cpu_addr, dma_handle, attrs);
552}
553
554static inline void *dma_alloc_coherent(struct device *dev, size_t size,
555 dma_addr_t *dma_handle, gfp_t flag)
556{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700557 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800558}
559
560static inline void dma_free_coherent(struct device *dev, size_t size,
561 void *cpu_addr, dma_addr_t dma_handle)
562{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700563 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800564}
565
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800566static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
567{
Robin Murphy5237e952017-07-24 18:29:27 +0100568 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800569
Robin Murphy5237e952017-07-24 18:29:27 +0100570 debug_dma_mapping_error(dev, dma_addr);
571 if (ops->mapping_error)
572 return ops->mapping_error(dev, dma_addr);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800573 return 0;
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800574}
575
Tom Lendacky648babb2017-07-17 16:10:22 -0500576static inline void dma_check_mask(struct device *dev, u64 mask)
577{
578 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
579 dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
580}
581
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800582static inline int dma_supported(struct device *dev, u64 mask)
583{
Bart Van Assche52997092017-01-20 13:04:01 -0800584 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800585
586 if (!ops)
587 return 0;
588 if (!ops->dma_supported)
589 return 1;
590 return ops->dma_supported(dev, mask);
591}
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800592
593#ifndef HAVE_ARCH_DMA_SET_MASK
594static inline int dma_set_mask(struct device *dev, u64 mask)
595{
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800596 if (!dev->dma_mask || !dma_supported(dev, mask))
597 return -EIO;
Tom Lendacky648babb2017-07-17 16:10:22 -0500598
599 dma_check_mask(dev, mask);
600
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800601 *dev->dma_mask = mask;
602 return 0;
603}
Dan Williams1b0fac42007-07-15 23:40:26 -0700604#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900606static inline u64 dma_get_mask(struct device *dev)
607{
FUJITA Tomonori07a2c012008-09-19 02:02:05 +0900608 if (dev && dev->dma_mask && *dev->dma_mask)
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900609 return *dev->dma_mask;
Yang Hongyang284901a2009-04-06 19:01:15 -0700610 return DMA_BIT_MASK(32);
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900611}
612
Rob Herring58af4a22012-03-20 14:33:01 -0500613#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700614int dma_set_coherent_mask(struct device *dev, u64 mask);
615#else
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800616static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
617{
618 if (!dma_supported(dev, mask))
619 return -EIO;
Tom Lendacky648babb2017-07-17 16:10:22 -0500620
621 dma_check_mask(dev, mask);
622
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800623 dev->coherent_dma_mask = mask;
624 return 0;
625}
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700626#endif
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800627
Russell King4aa806b2013-06-26 13:49:44 +0100628/*
629 * Set both the DMA mask and the coherent DMA mask to the same thing.
630 * Note that we don't check the return value from dma_set_coherent_mask()
631 * as the DMA API guarantees that the coherent DMA mask can be set to
632 * the same or smaller than the streaming DMA mask.
633 */
634static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
635{
636 int rc = dma_set_mask(dev, mask);
637 if (rc == 0)
638 dma_set_coherent_mask(dev, mask);
639 return rc;
640}
641
Russell Kingfa6a8d62013-06-27 12:21:45 +0100642/*
643 * Similar to the above, except it deals with the case where the device
644 * does not have dev->dma_mask appropriately setup.
645 */
646static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
647{
648 dev->dma_mask = &dev->coherent_dma_mask;
649 return dma_set_mask_and_coherent(dev, mask);
650}
651
Linus Torvalds1da177e2005-04-16 15:20:36 -0700652extern u64 dma_get_required_mask(struct device *dev);
653
Will Deacona3a60f82014-08-27 15:49:10 +0100654#ifndef arch_setup_dma_ops
Will Deacon97890ba2014-08-27 16:24:20 +0100655static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
Robin Murphy53c92d72016-04-07 18:42:05 +0100656 u64 size, const struct iommu_ops *iommu,
Will Deacon97890ba2014-08-27 16:24:20 +0100657 bool coherent) { }
658#endif
659
660#ifndef arch_teardown_dma_ops
661static inline void arch_teardown_dma_ops(struct device *dev) { }
Santosh Shilimkar591c1ee2014-04-24 11:30:04 -0400662#endif
663
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800664static inline unsigned int dma_get_max_seg_size(struct device *dev)
665{
Robin Murphy002edb62015-11-06 16:32:51 -0800666 if (dev->dma_parms && dev->dma_parms->max_segment_size)
667 return dev->dma_parms->max_segment_size;
668 return SZ_64K;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800669}
670
671static inline unsigned int dma_set_max_seg_size(struct device *dev,
672 unsigned int size)
673{
674 if (dev->dma_parms) {
675 dev->dma_parms->max_segment_size = size;
676 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800677 }
678 return -EIO;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800679}
680
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800681static inline unsigned long dma_get_seg_boundary(struct device *dev)
682{
Robin Murphy002edb62015-11-06 16:32:51 -0800683 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
684 return dev->dma_parms->segment_boundary_mask;
685 return DMA_BIT_MASK(32);
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800686}
687
688static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
689{
690 if (dev->dma_parms) {
691 dev->dma_parms->segment_boundary_mask = mask;
692 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800693 }
694 return -EIO;
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800695}
696
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100697#ifndef dma_max_pfn
698static inline unsigned long dma_max_pfn(struct device *dev)
699{
700 return *dev->dma_mask >> PAGE_SHIFT;
701}
702#endif
703
Andrew Morton842fa692011-11-02 13:39:33 -0700704static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
705 dma_addr_t *dma_handle, gfp_t flag)
706{
Joe Perchesede23fa82013-08-26 22:45:23 -0700707 void *ret = dma_alloc_coherent(dev, size, dma_handle,
708 flag | __GFP_ZERO);
Andrew Morton842fa692011-11-02 13:39:33 -0700709 return ret;
710}
711
Heiko Carstense259f192010-08-13 09:39:18 +0200712#ifdef CONFIG_HAS_DMA
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700713static inline int dma_get_cache_alignment(void)
714{
715#ifdef ARCH_DMA_MINALIGN
716 return ARCH_DMA_MINALIGN;
717#endif
718 return 1;
719}
Heiko Carstense259f192010-08-13 09:39:18 +0200720#endif
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700721
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722/* flags for the coherent memory api */
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200723#define DMA_MEMORY_EXCLUSIVE 0x01
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800725#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
726int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
727 dma_addr_t device_addr, size_t size, int flags);
728void dma_release_declared_memory(struct device *dev);
729void *dma_mark_declared_memory_occupied(struct device *dev,
730 dma_addr_t device_addr, size_t size);
731#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700732static inline int
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600733dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 dma_addr_t device_addr, size_t size, int flags)
735{
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200736 return -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737}
738
739static inline void
740dma_release_declared_memory(struct device *dev)
741{
742}
743
744static inline void *
745dma_mark_declared_memory_occupied(struct device *dev,
746 dma_addr_t device_addr, size_t size)
747{
748 return ERR_PTR(-EBUSY);
749}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800750#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751
Sricharan R09515ef2017-04-10 16:51:01 +0530752#ifdef CONFIG_HAS_DMA
753int dma_configure(struct device *dev);
754void dma_deconfigure(struct device *dev);
755#else
756static inline int dma_configure(struct device *dev)
757{
758 return 0;
759}
760
761static inline void dma_deconfigure(struct device *dev) {}
762#endif
763
Tejun Heo9ac78492007-01-20 16:00:26 +0900764/*
765 * Managed DMA API
766 */
767extern void *dmam_alloc_coherent(struct device *dev, size_t size,
768 dma_addr_t *dma_handle, gfp_t gfp);
769extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
770 dma_addr_t dma_handle);
Christoph Hellwig63d36c92017-06-12 19:15:04 +0200771extern void *dmam_alloc_attrs(struct device *dev, size_t size,
772 dma_addr_t *dma_handle, gfp_t gfp,
773 unsigned long attrs);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800774#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600775extern int dmam_declare_coherent_memory(struct device *dev,
776 phys_addr_t phys_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900777 dma_addr_t device_addr, size_t size,
778 int flags);
779extern void dmam_release_declared_memory(struct device *dev);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800780#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900781static inline int dmam_declare_coherent_memory(struct device *dev,
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600782 phys_addr_t phys_addr, dma_addr_t device_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900783 size_t size, gfp_t gfp)
784{
785 return 0;
786}
787
788static inline void dmam_release_declared_memory(struct device *dev)
789{
790}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800791#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900792
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800793static inline void *dma_alloc_wc(struct device *dev, size_t size,
794 dma_addr_t *dma_addr, gfp_t gfp)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200795{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700796 return dma_alloc_attrs(dev, size, dma_addr, gfp,
797 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200798}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800799#ifndef dma_alloc_writecombine
800#define dma_alloc_writecombine dma_alloc_wc
801#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200802
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800803static inline void dma_free_wc(struct device *dev, size_t size,
804 void *cpu_addr, dma_addr_t dma_addr)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200805{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700806 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
807 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200808}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800809#ifndef dma_free_writecombine
810#define dma_free_writecombine dma_free_wc
811#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200812
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800813static inline int dma_mmap_wc(struct device *dev,
814 struct vm_area_struct *vma,
815 void *cpu_addr, dma_addr_t dma_addr,
816 size_t size)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200817{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700818 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
819 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200820}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800821#ifndef dma_mmap_writecombine
822#define dma_mmap_writecombine dma_mmap_wc
823#endif
Arthur Kepner74bc7ce2008-04-29 01:00:30 -0700824
Andrey Smirnov24813662016-09-28 15:22:33 -0700825#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
FUJITA Tomonori0acedc12010-03-10 15:23:31 -0800826#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
827#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
828#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
829#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
830#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
831#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
832#else
833#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
834#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
835#define dma_unmap_addr(PTR, ADDR_NAME) (0)
836#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
837#define dma_unmap_len(PTR, LEN_NAME) (0)
838#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
839#endif
840
Linus Torvalds1da177e2005-04-16 15:20:36 -0700841#endif