blob: 7653ea66874dd53260cdfbbe386dd915b4c1675a [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Robert P. J. Day96532ba2008-02-03 15:06:26 +02002#ifndef _LINUX_DMA_MAPPING_H
3#define _LINUX_DMA_MAPPING_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
Robin Murphy002edb62015-11-06 16:32:51 -08005#include <linux/sizes.h>
Andrew Morton842fa692011-11-02 13:39:33 -07006#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/device.h>
8#include <linux/err.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -08009#include <linux/dma-debug.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000010#include <linux/dma-direction.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090011#include <linux/scatterlist.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -080012#include <linux/kmemcheck.h>
13#include <linux/bug.h>
Tom Lendacky648babb2017-07-17 16:10:22 -050014#include <linux/mem_encrypt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070016/**
17 * List of possible attributes associated with a DMA mapping. The semantics
18 * of each attribute should be defined in Documentation/DMA-attributes.txt.
19 *
20 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
21 * forces all pending DMA writes to complete.
22 */
23#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
24/*
25 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
26 * may be weakly ordered, that is that reads and writes may pass each other.
27 */
28#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
29/*
30 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
31 * buffered to improve performance.
32 */
33#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
34/*
35 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
36 * consistent or non-consistent memory as it sees fit.
37 */
38#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
39/*
40 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
41 * virtual mapping for the allocated buffer.
42 */
43#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
44/*
45 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
46 * the CPU cache for the given buffer assuming that it has been already
47 * transferred to 'device' domain.
48 */
49#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
50/*
51 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
52 * in physical memory.
53 */
54#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
55/*
56 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
57 * that it's probably not worth the time to try to allocate memory to in a way
58 * that gives better TLB efficiency.
59 */
60#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
Mauricio Faria de Oliveiraa9a62c92016-10-11 13:54:14 -070061/*
62 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
63 * allocation failure reports (similarly to __GFP_NOWARN).
64 */
65#define DMA_ATTR_NO_WARN (1UL << 8)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070066
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060067/*
Mitchel Humpherysb2fb3662017-01-06 18:58:11 +053068 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
69 * accessible at an elevated privilege level (and ideally inaccessible or
70 * at least read-only at lesser-privileged levels).
71 */
72#define DMA_ATTR_PRIVILEGED (1UL << 9)
73
74/*
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060075 * A dma_addr_t can hold any valid DMA or bus address for the platform.
76 * It can be given to a device to use as a DMA source or target. A CPU cannot
77 * reference a dma_addr_t directly because there may be translation between
78 * its physical address space and the bus address space.
79 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090080struct dma_map_ops {
Marek Szyprowski613c4572012-03-28 16:36:27 +020081 void* (*alloc)(struct device *dev, size_t size,
82 dma_addr_t *dma_handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070083 unsigned long attrs);
Marek Szyprowski613c4572012-03-28 16:36:27 +020084 void (*free)(struct device *dev, size_t size,
85 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070086 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010087 int (*mmap)(struct device *, struct vm_area_struct *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070088 void *, dma_addr_t, size_t,
89 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010090
Marek Szyprowskid2b74282012-06-13 10:05:52 +020091 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070092 dma_addr_t, size_t, unsigned long attrs);
Marek Szyprowskid2b74282012-06-13 10:05:52 +020093
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090094 dma_addr_t (*map_page)(struct device *dev, struct page *page,
95 unsigned long offset, size_t size,
96 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070097 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090098 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
99 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700100 unsigned long attrs);
Ricardo Ribalda Delgado04abab62015-02-11 13:53:15 +0100101 /*
102 * map_sg returns 0 on error and a value > 0 on success.
103 * It should never return a value < 0.
104 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900105 int (*map_sg)(struct device *dev, struct scatterlist *sg,
106 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700107 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900108 void (*unmap_sg)(struct device *dev,
109 struct scatterlist *sg, int nents,
110 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700111 unsigned long attrs);
Niklas Söderlundba409b32016-08-10 13:22:14 +0200112 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
113 size_t size, enum dma_data_direction dir,
114 unsigned long attrs);
115 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
116 size_t size, enum dma_data_direction dir,
117 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900118 void (*sync_single_for_cpu)(struct device *dev,
119 dma_addr_t dma_handle, size_t size,
120 enum dma_data_direction dir);
121 void (*sync_single_for_device)(struct device *dev,
122 dma_addr_t dma_handle, size_t size,
123 enum dma_data_direction dir);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900124 void (*sync_sg_for_cpu)(struct device *dev,
125 struct scatterlist *sg, int nents,
126 enum dma_data_direction dir);
127 void (*sync_sg_for_device)(struct device *dev,
128 struct scatterlist *sg, int nents,
129 enum dma_data_direction dir);
130 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
131 int (*dma_supported)(struct device *dev, u64 mask);
Milton Miller3a8f7552011-06-24 09:05:23 +0000132#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
133 u64 (*get_required_mask)(struct device *dev);
134#endif
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900135 int is_phys;
136};
137
Bart Van Assche52997092017-01-20 13:04:01 -0800138extern const struct dma_map_ops dma_noop_ops;
Bart Van Assche551199a2017-01-20 13:04:07 -0800139extern const struct dma_map_ops dma_virt_ops;
Christian Borntraegera8463d42016-02-02 21:46:32 -0800140
Andrew Morton8f286c32007-10-18 03:05:07 -0700141#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
Borislav Petkov34c65382007-10-18 03:05:06 -0700142
James Bottomley32e8f702007-10-16 01:23:55 -0700143#define DMA_MASK_NONE 0x0ULL
144
Rolf Eike Beerd6bd3a32006-09-29 01:59:48 -0700145static inline int valid_dma_direction(int dma_direction)
146{
147 return ((dma_direction == DMA_BIDIRECTIONAL) ||
148 (dma_direction == DMA_TO_DEVICE) ||
149 (dma_direction == DMA_FROM_DEVICE));
150}
151
James Bottomley32e8f702007-10-16 01:23:55 -0700152static inline int is_device_dma_capable(struct device *dev)
153{
154 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
155}
156
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800157#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
158/*
159 * These three functions are only for dma allocator.
160 * Don't use them in device drivers.
161 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100162int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800163 dma_addr_t *dma_handle, void **ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100164int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800165
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100166int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800167 void *cpu_addr, size_t size, int *ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100168
169void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
170int dma_release_from_global_coherent(int order, void *vaddr);
171int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
172 size_t size, int *ret);
173
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800174#else
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100175#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
176#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
177#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
178
179static inline void *dma_alloc_from_global_coherent(ssize_t size,
180 dma_addr_t *dma_handle)
181{
182 return NULL;
183}
184
185static inline int dma_release_from_global_coherent(int order, void *vaddr)
186{
187 return 0;
188}
189
190static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
191 void *cpu_addr, size_t size,
192 int *ret)
193{
194 return 0;
195}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800196#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
197
Dan Williams1b0fac42007-07-15 23:40:26 -0700198#ifdef CONFIG_HAS_DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199#include <asm/dma-mapping.h>
Bart Van Assche815dd182017-01-20 13:04:04 -0800200static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
201{
202 if (dev && dev->dma_ops)
203 return dev->dma_ops;
204 return get_arch_dma_ops(dev ? dev->bus : NULL);
205}
206
Bart Van Asscheca6e8e12017-01-20 13:04:03 -0800207static inline void set_dma_ops(struct device *dev,
208 const struct dma_map_ops *dma_ops)
209{
210 dev->dma_ops = dma_ops;
211}
Dan Williams1b0fac42007-07-15 23:40:26 -0700212#else
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800213/*
214 * Define the dma api to allow compilation but not linking of
215 * dma dependent code. Code that depends on the dma-mapping
216 * API needs to set 'depends on HAS_DMA' in its Kconfig
217 */
Bart Van Assche52997092017-01-20 13:04:01 -0800218extern const struct dma_map_ops bad_dma_ops;
219static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800220{
221 return &bad_dma_ops;
222}
223#endif
224
225static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
226 size_t size,
227 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700228 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800229{
Bart Van Assche52997092017-01-20 13:04:01 -0800230 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800231 dma_addr_t addr;
232
233 kmemcheck_mark_initialized(ptr, size);
234 BUG_ON(!valid_dma_direction(dir));
235 addr = ops->map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800236 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800237 dir, attrs);
238 debug_dma_map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800239 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800240 dir, addr, true);
241 return addr;
242}
243
244static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
245 size_t size,
246 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700247 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800248{
Bart Van Assche52997092017-01-20 13:04:01 -0800249 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800250
251 BUG_ON(!valid_dma_direction(dir));
252 if (ops->unmap_page)
253 ops->unmap_page(dev, addr, size, dir, attrs);
254 debug_dma_unmap_page(dev, addr, size, dir, true);
255}
256
257/*
258 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
259 * It should never return a value < 0.
260 */
261static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
262 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700263 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800264{
Bart Van Assche52997092017-01-20 13:04:01 -0800265 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800266 int i, ents;
267 struct scatterlist *s;
268
269 for_each_sg(sg, s, nents, i)
270 kmemcheck_mark_initialized(sg_virt(s), s->length);
271 BUG_ON(!valid_dma_direction(dir));
272 ents = ops->map_sg(dev, sg, nents, dir, attrs);
273 BUG_ON(ents < 0);
274 debug_dma_map_sg(dev, sg, nents, ents, dir);
275
276 return ents;
277}
278
279static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
280 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700281 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800282{
Bart Van Assche52997092017-01-20 13:04:01 -0800283 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800284
285 BUG_ON(!valid_dma_direction(dir));
286 debug_dma_unmap_sg(dev, sg, nents, dir);
287 if (ops->unmap_sg)
288 ops->unmap_sg(dev, sg, nents, dir, attrs);
289}
290
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800291static inline dma_addr_t dma_map_page_attrs(struct device *dev,
292 struct page *page,
293 size_t offset, size_t size,
294 enum dma_data_direction dir,
295 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800296{
Bart Van Assche52997092017-01-20 13:04:01 -0800297 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800298 dma_addr_t addr;
299
300 kmemcheck_mark_initialized(page_address(page) + offset, size);
301 BUG_ON(!valid_dma_direction(dir));
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800302 addr = ops->map_page(dev, page, offset, size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800303 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
304
305 return addr;
306}
307
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800308static inline void dma_unmap_page_attrs(struct device *dev,
309 dma_addr_t addr, size_t size,
310 enum dma_data_direction dir,
311 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800312{
Bart Van Assche52997092017-01-20 13:04:01 -0800313 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800314
315 BUG_ON(!valid_dma_direction(dir));
316 if (ops->unmap_page)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800317 ops->unmap_page(dev, addr, size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800318 debug_dma_unmap_page(dev, addr, size, dir, false);
319}
320
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200321static inline dma_addr_t dma_map_resource(struct device *dev,
322 phys_addr_t phys_addr,
323 size_t size,
324 enum dma_data_direction dir,
325 unsigned long attrs)
326{
Bart Van Assche52997092017-01-20 13:04:01 -0800327 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200328 dma_addr_t addr;
329
330 BUG_ON(!valid_dma_direction(dir));
331
332 /* Don't allow RAM to be mapped */
Niklas Söderlund3757dc42016-09-29 12:02:40 +0200333 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200334
335 addr = phys_addr;
336 if (ops->map_resource)
337 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
338
339 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
340
341 return addr;
342}
343
344static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
345 size_t size, enum dma_data_direction dir,
346 unsigned long attrs)
347{
Bart Van Assche52997092017-01-20 13:04:01 -0800348 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200349
350 BUG_ON(!valid_dma_direction(dir));
351 if (ops->unmap_resource)
352 ops->unmap_resource(dev, addr, size, dir, attrs);
353 debug_dma_unmap_resource(dev, addr, size, dir);
354}
355
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800356static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
357 size_t size,
358 enum dma_data_direction dir)
359{
Bart Van Assche52997092017-01-20 13:04:01 -0800360 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800361
362 BUG_ON(!valid_dma_direction(dir));
363 if (ops->sync_single_for_cpu)
364 ops->sync_single_for_cpu(dev, addr, size, dir);
365 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
366}
367
368static inline void dma_sync_single_for_device(struct device *dev,
369 dma_addr_t addr, size_t size,
370 enum dma_data_direction dir)
371{
Bart Van Assche52997092017-01-20 13:04:01 -0800372 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800373
374 BUG_ON(!valid_dma_direction(dir));
375 if (ops->sync_single_for_device)
376 ops->sync_single_for_device(dev, addr, size, dir);
377 debug_dma_sync_single_for_device(dev, addr, size, dir);
378}
379
380static inline void dma_sync_single_range_for_cpu(struct device *dev,
381 dma_addr_t addr,
382 unsigned long offset,
383 size_t size,
384 enum dma_data_direction dir)
385{
386 const struct dma_map_ops *ops = get_dma_ops(dev);
387
388 BUG_ON(!valid_dma_direction(dir));
389 if (ops->sync_single_for_cpu)
390 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
391 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
392}
393
394static inline void dma_sync_single_range_for_device(struct device *dev,
395 dma_addr_t addr,
396 unsigned long offset,
397 size_t size,
398 enum dma_data_direction dir)
399{
400 const struct dma_map_ops *ops = get_dma_ops(dev);
401
402 BUG_ON(!valid_dma_direction(dir));
403 if (ops->sync_single_for_device)
404 ops->sync_single_for_device(dev, addr + offset, size, dir);
405 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
406}
407
408static inline void
409dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
410 int nelems, enum dma_data_direction dir)
411{
Bart Van Assche52997092017-01-20 13:04:01 -0800412 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800413
414 BUG_ON(!valid_dma_direction(dir));
415 if (ops->sync_sg_for_cpu)
416 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
417 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
418}
419
420static inline void
421dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
422 int nelems, enum dma_data_direction dir)
423{
Bart Van Assche52997092017-01-20 13:04:01 -0800424 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800425
426 BUG_ON(!valid_dma_direction(dir));
427 if (ops->sync_sg_for_device)
428 ops->sync_sg_for_device(dev, sg, nelems, dir);
429 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
430
431}
432
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700433#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
434#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
435#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
436#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800437#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
438#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800439
440extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
441 void *cpu_addr, dma_addr_t dma_addr, size_t size);
442
443void *dma_common_contiguous_remap(struct page *page, size_t size,
444 unsigned long vm_flags,
445 pgprot_t prot, const void *caller);
446
447void *dma_common_pages_remap(struct page **pages, size_t size,
448 unsigned long vm_flags, pgprot_t prot,
449 const void *caller);
450void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
451
452/**
453 * dma_mmap_attrs - map a coherent DMA allocation into user space
454 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
455 * @vma: vm_area_struct describing requested user mapping
456 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
457 * @handle: device-view address returned from dma_alloc_attrs
458 * @size: size of memory originally requested in dma_alloc_attrs
459 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
460 *
461 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
462 * into user space. The coherent DMA buffer must not be freed by the
463 * driver until the user space mapping has been released.
464 */
465static inline int
466dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700467 dma_addr_t dma_addr, size_t size, unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800468{
Bart Van Assche52997092017-01-20 13:04:01 -0800469 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800470 BUG_ON(!ops);
471 if (ops->mmap)
472 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
473 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
474}
475
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700476#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800477
478int
479dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
480 void *cpu_addr, dma_addr_t dma_addr, size_t size);
481
482static inline int
483dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700484 dma_addr_t dma_addr, size_t size,
485 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800486{
Bart Van Assche52997092017-01-20 13:04:01 -0800487 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800488 BUG_ON(!ops);
489 if (ops->get_sgtable)
490 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
491 attrs);
492 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
493}
494
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700495#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800496
497#ifndef arch_dma_alloc_attrs
498#define arch_dma_alloc_attrs(dev, flag) (true)
499#endif
500
501static inline void *dma_alloc_attrs(struct device *dev, size_t size,
502 dma_addr_t *dma_handle, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700503 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800504{
Bart Van Assche52997092017-01-20 13:04:01 -0800505 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800506 void *cpu_addr;
507
508 BUG_ON(!ops);
509
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100510 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800511 return cpu_addr;
512
513 if (!arch_dma_alloc_attrs(&dev, &flag))
514 return NULL;
515 if (!ops->alloc)
516 return NULL;
517
518 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
519 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
520 return cpu_addr;
521}
522
523static inline void dma_free_attrs(struct device *dev, size_t size,
524 void *cpu_addr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700525 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800526{
Bart Van Assche52997092017-01-20 13:04:01 -0800527 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800528
529 BUG_ON(!ops);
530 WARN_ON(irqs_disabled());
531
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100532 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800533 return;
534
Zhen Leid6b7eae2016-03-09 14:08:38 -0800535 if (!ops->free || !cpu_addr)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800536 return;
537
538 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
539 ops->free(dev, size, cpu_addr, dma_handle, attrs);
540}
541
542static inline void *dma_alloc_coherent(struct device *dev, size_t size,
543 dma_addr_t *dma_handle, gfp_t flag)
544{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700545 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800546}
547
548static inline void dma_free_coherent(struct device *dev, size_t size,
549 void *cpu_addr, dma_addr_t dma_handle)
550{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700551 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800552}
553
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800554static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
555{
Robin Murphy5237e952017-07-24 18:29:27 +0100556 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800557
Robin Murphy5237e952017-07-24 18:29:27 +0100558 debug_dma_mapping_error(dev, dma_addr);
559 if (ops->mapping_error)
560 return ops->mapping_error(dev, dma_addr);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800561 return 0;
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800562}
563
Tom Lendacky648babb2017-07-17 16:10:22 -0500564static inline void dma_check_mask(struct device *dev, u64 mask)
565{
566 if (sme_active() && (mask < (((u64)sme_get_me_mask() << 1) - 1)))
567 dev_warn(dev, "SME is active, device will require DMA bounce buffers\n");
568}
569
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800570static inline int dma_supported(struct device *dev, u64 mask)
571{
Bart Van Assche52997092017-01-20 13:04:01 -0800572 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800573
574 if (!ops)
575 return 0;
576 if (!ops->dma_supported)
577 return 1;
578 return ops->dma_supported(dev, mask);
579}
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800580
581#ifndef HAVE_ARCH_DMA_SET_MASK
582static inline int dma_set_mask(struct device *dev, u64 mask)
583{
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800584 if (!dev->dma_mask || !dma_supported(dev, mask))
585 return -EIO;
Tom Lendacky648babb2017-07-17 16:10:22 -0500586
587 dma_check_mask(dev, mask);
588
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800589 *dev->dma_mask = mask;
590 return 0;
591}
Dan Williams1b0fac42007-07-15 23:40:26 -0700592#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900594static inline u64 dma_get_mask(struct device *dev)
595{
FUJITA Tomonori07a2c012008-09-19 02:02:05 +0900596 if (dev && dev->dma_mask && *dev->dma_mask)
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900597 return *dev->dma_mask;
Yang Hongyang284901a2009-04-06 19:01:15 -0700598 return DMA_BIT_MASK(32);
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900599}
600
Rob Herring58af4a22012-03-20 14:33:01 -0500601#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700602int dma_set_coherent_mask(struct device *dev, u64 mask);
603#else
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800604static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
605{
606 if (!dma_supported(dev, mask))
607 return -EIO;
Tom Lendacky648babb2017-07-17 16:10:22 -0500608
609 dma_check_mask(dev, mask);
610
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800611 dev->coherent_dma_mask = mask;
612 return 0;
613}
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700614#endif
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800615
Russell King4aa806b2013-06-26 13:49:44 +0100616/*
617 * Set both the DMA mask and the coherent DMA mask to the same thing.
618 * Note that we don't check the return value from dma_set_coherent_mask()
619 * as the DMA API guarantees that the coherent DMA mask can be set to
620 * the same or smaller than the streaming DMA mask.
621 */
622static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
623{
624 int rc = dma_set_mask(dev, mask);
625 if (rc == 0)
626 dma_set_coherent_mask(dev, mask);
627 return rc;
628}
629
Russell Kingfa6a8d62013-06-27 12:21:45 +0100630/*
631 * Similar to the above, except it deals with the case where the device
632 * does not have dev->dma_mask appropriately setup.
633 */
634static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
635{
636 dev->dma_mask = &dev->coherent_dma_mask;
637 return dma_set_mask_and_coherent(dev, mask);
638}
639
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640extern u64 dma_get_required_mask(struct device *dev);
641
Will Deacona3a60f82014-08-27 15:49:10 +0100642#ifndef arch_setup_dma_ops
Will Deacon97890ba2014-08-27 16:24:20 +0100643static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
Robin Murphy53c92d72016-04-07 18:42:05 +0100644 u64 size, const struct iommu_ops *iommu,
Will Deacon97890ba2014-08-27 16:24:20 +0100645 bool coherent) { }
646#endif
647
648#ifndef arch_teardown_dma_ops
649static inline void arch_teardown_dma_ops(struct device *dev) { }
Santosh Shilimkar591c1ee2014-04-24 11:30:04 -0400650#endif
651
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800652static inline unsigned int dma_get_max_seg_size(struct device *dev)
653{
Robin Murphy002edb62015-11-06 16:32:51 -0800654 if (dev->dma_parms && dev->dma_parms->max_segment_size)
655 return dev->dma_parms->max_segment_size;
656 return SZ_64K;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800657}
658
659static inline unsigned int dma_set_max_seg_size(struct device *dev,
660 unsigned int size)
661{
662 if (dev->dma_parms) {
663 dev->dma_parms->max_segment_size = size;
664 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800665 }
666 return -EIO;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800667}
668
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800669static inline unsigned long dma_get_seg_boundary(struct device *dev)
670{
Robin Murphy002edb62015-11-06 16:32:51 -0800671 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
672 return dev->dma_parms->segment_boundary_mask;
673 return DMA_BIT_MASK(32);
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800674}
675
676static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
677{
678 if (dev->dma_parms) {
679 dev->dma_parms->segment_boundary_mask = mask;
680 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800681 }
682 return -EIO;
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800683}
684
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100685#ifndef dma_max_pfn
686static inline unsigned long dma_max_pfn(struct device *dev)
687{
688 return *dev->dma_mask >> PAGE_SHIFT;
689}
690#endif
691
Andrew Morton842fa692011-11-02 13:39:33 -0700692static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
693 dma_addr_t *dma_handle, gfp_t flag)
694{
Joe Perchesede23fa82013-08-26 22:45:23 -0700695 void *ret = dma_alloc_coherent(dev, size, dma_handle,
696 flag | __GFP_ZERO);
Andrew Morton842fa692011-11-02 13:39:33 -0700697 return ret;
698}
699
Heiko Carstense259f192010-08-13 09:39:18 +0200700#ifdef CONFIG_HAS_DMA
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700701static inline int dma_get_cache_alignment(void)
702{
703#ifdef ARCH_DMA_MINALIGN
704 return ARCH_DMA_MINALIGN;
705#endif
706 return 1;
707}
Heiko Carstense259f192010-08-13 09:39:18 +0200708#endif
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700709
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710/* flags for the coherent memory api */
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200711#define DMA_MEMORY_EXCLUSIVE 0x01
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800713#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
714int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
715 dma_addr_t device_addr, size_t size, int flags);
716void dma_release_declared_memory(struct device *dev);
717void *dma_mark_declared_memory_occupied(struct device *dev,
718 dma_addr_t device_addr, size_t size);
719#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720static inline int
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600721dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 dma_addr_t device_addr, size_t size, int flags)
723{
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200724 return -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725}
726
727static inline void
728dma_release_declared_memory(struct device *dev)
729{
730}
731
732static inline void *
733dma_mark_declared_memory_occupied(struct device *dev,
734 dma_addr_t device_addr, size_t size)
735{
736 return ERR_PTR(-EBUSY);
737}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800738#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700739
Sricharan R09515ef2017-04-10 16:51:01 +0530740#ifdef CONFIG_HAS_DMA
741int dma_configure(struct device *dev);
742void dma_deconfigure(struct device *dev);
743#else
744static inline int dma_configure(struct device *dev)
745{
746 return 0;
747}
748
749static inline void dma_deconfigure(struct device *dev) {}
750#endif
751
Tejun Heo9ac78492007-01-20 16:00:26 +0900752/*
753 * Managed DMA API
754 */
755extern void *dmam_alloc_coherent(struct device *dev, size_t size,
756 dma_addr_t *dma_handle, gfp_t gfp);
757extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
758 dma_addr_t dma_handle);
Christoph Hellwig63d36c92017-06-12 19:15:04 +0200759extern void *dmam_alloc_attrs(struct device *dev, size_t size,
760 dma_addr_t *dma_handle, gfp_t gfp,
761 unsigned long attrs);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800762#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600763extern int dmam_declare_coherent_memory(struct device *dev,
764 phys_addr_t phys_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900765 dma_addr_t device_addr, size_t size,
766 int flags);
767extern void dmam_release_declared_memory(struct device *dev);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800768#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900769static inline int dmam_declare_coherent_memory(struct device *dev,
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600770 phys_addr_t phys_addr, dma_addr_t device_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900771 size_t size, gfp_t gfp)
772{
773 return 0;
774}
775
776static inline void dmam_release_declared_memory(struct device *dev)
777{
778}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800779#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900780
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800781static inline void *dma_alloc_wc(struct device *dev, size_t size,
782 dma_addr_t *dma_addr, gfp_t gfp)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200783{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700784 return dma_alloc_attrs(dev, size, dma_addr, gfp,
785 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200786}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800787#ifndef dma_alloc_writecombine
788#define dma_alloc_writecombine dma_alloc_wc
789#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200790
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800791static inline void dma_free_wc(struct device *dev, size_t size,
792 void *cpu_addr, dma_addr_t dma_addr)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200793{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700794 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
795 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200796}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800797#ifndef dma_free_writecombine
798#define dma_free_writecombine dma_free_wc
799#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200800
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800801static inline int dma_mmap_wc(struct device *dev,
802 struct vm_area_struct *vma,
803 void *cpu_addr, dma_addr_t dma_addr,
804 size_t size)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200805{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700806 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
807 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200808}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800809#ifndef dma_mmap_writecombine
810#define dma_mmap_writecombine dma_mmap_wc
811#endif
Arthur Kepner74bc7ce2008-04-29 01:00:30 -0700812
Andrey Smirnov24813662016-09-28 15:22:33 -0700813#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
FUJITA Tomonori0acedc12010-03-10 15:23:31 -0800814#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
815#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
816#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
817#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
818#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
819#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
820#else
821#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
822#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
823#define dma_unmap_addr(PTR, ADDR_NAME) (0)
824#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
825#define dma_unmap_len(PTR, LEN_NAME) (0)
826#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
827#endif
828
Linus Torvalds1da177e2005-04-16 15:20:36 -0700829#endif