blob: 6309a721394bf52f3ad578a2144320b4732e9cbf [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Robert P. J. Day96532ba2008-02-03 15:06:26 +02002#ifndef _LINUX_DMA_MAPPING_H
3#define _LINUX_DMA_MAPPING_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
Robin Murphy002edb62015-11-06 16:32:51 -08005#include <linux/sizes.h>
Andrew Morton842fa692011-11-02 13:39:33 -07006#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/device.h>
8#include <linux/err.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -08009#include <linux/dma-debug.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000010#include <linux/dma-direction.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090011#include <linux/scatterlist.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -080012#include <linux/bug.h>
Tom Lendacky648babb2017-07-17 16:10:22 -050013#include <linux/mem_encrypt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070015/**
16 * List of possible attributes associated with a DMA mapping. The semantics
17 * of each attribute should be defined in Documentation/DMA-attributes.txt.
18 *
19 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
20 * forces all pending DMA writes to complete.
21 */
22#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
23/*
24 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
25 * may be weakly ordered, that is that reads and writes may pass each other.
26 */
27#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
28/*
29 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
30 * buffered to improve performance.
31 */
32#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
33/*
34 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
35 * consistent or non-consistent memory as it sees fit.
36 */
37#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
38/*
39 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
40 * virtual mapping for the allocated buffer.
41 */
42#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
43/*
44 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
45 * the CPU cache for the given buffer assuming that it has been already
46 * transferred to 'device' domain.
47 */
48#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
49/*
50 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
51 * in physical memory.
52 */
53#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
54/*
55 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
56 * that it's probably not worth the time to try to allocate memory to in a way
57 * that gives better TLB efficiency.
58 */
59#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
Mauricio Faria de Oliveiraa9a62c92016-10-11 13:54:14 -070060/*
61 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
62 * allocation failure reports (similarly to __GFP_NOWARN).
63 */
64#define DMA_ATTR_NO_WARN (1UL << 8)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070065
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060066/*
Mitchel Humpherysb2fb3662017-01-06 18:58:11 +053067 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
68 * accessible at an elevated privilege level (and ideally inaccessible or
69 * at least read-only at lesser-privileged levels).
70 */
71#define DMA_ATTR_PRIVILEGED (1UL << 9)
72
73/*
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060074 * A dma_addr_t can hold any valid DMA or bus address for the platform.
75 * It can be given to a device to use as a DMA source or target. A CPU cannot
76 * reference a dma_addr_t directly because there may be translation between
77 * its physical address space and the bus address space.
78 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090079struct dma_map_ops {
Marek Szyprowski613c4572012-03-28 16:36:27 +020080 void* (*alloc)(struct device *dev, size_t size,
81 dma_addr_t *dma_handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070082 unsigned long attrs);
Marek Szyprowski613c4572012-03-28 16:36:27 +020083 void (*free)(struct device *dev, size_t size,
84 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070085 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010086 int (*mmap)(struct device *, struct vm_area_struct *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070087 void *, dma_addr_t, size_t,
88 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010089
Marek Szyprowskid2b74282012-06-13 10:05:52 +020090 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070091 dma_addr_t, size_t, unsigned long attrs);
Marek Szyprowskid2b74282012-06-13 10:05:52 +020092
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090093 dma_addr_t (*map_page)(struct device *dev, struct page *page,
94 unsigned long offset, size_t size,
95 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070096 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090097 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
98 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070099 unsigned long attrs);
Ricardo Ribalda Delgado04abab62015-02-11 13:53:15 +0100100 /*
101 * map_sg returns 0 on error and a value > 0 on success.
102 * It should never return a value < 0.
103 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900104 int (*map_sg)(struct device *dev, struct scatterlist *sg,
105 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700106 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900107 void (*unmap_sg)(struct device *dev,
108 struct scatterlist *sg, int nents,
109 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700110 unsigned long attrs);
Niklas Söderlundba409b32016-08-10 13:22:14 +0200111 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
112 size_t size, enum dma_data_direction dir,
113 unsigned long attrs);
114 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
115 size_t size, enum dma_data_direction dir,
116 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900117 void (*sync_single_for_cpu)(struct device *dev,
118 dma_addr_t dma_handle, size_t size,
119 enum dma_data_direction dir);
120 void (*sync_single_for_device)(struct device *dev,
121 dma_addr_t dma_handle, size_t size,
122 enum dma_data_direction dir);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900123 void (*sync_sg_for_cpu)(struct device *dev,
124 struct scatterlist *sg, int nents,
125 enum dma_data_direction dir);
126 void (*sync_sg_for_device)(struct device *dev,
127 struct scatterlist *sg, int nents,
128 enum dma_data_direction dir);
Christoph Hellwigc9eb6172017-08-27 10:37:15 +0200129 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
130 enum dma_data_direction direction);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900131 int (*dma_supported)(struct device *dev, u64 mask);
Milton Miller3a8f7552011-06-24 09:05:23 +0000132 u64 (*get_required_mask)(struct device *dev);
Joerg Roedel133d6242019-02-07 12:59:15 +0100133 size_t (*max_mapping_size)(struct device *dev);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900134};
135
Christoph Hellwig42ee3ca2018-11-21 18:52:35 +0100136#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
137
Bart Van Assche551199a2017-01-20 13:04:07 -0800138extern const struct dma_map_ops dma_virt_ops;
Robin Murphy90ac7062018-12-06 13:14:44 -0800139extern const struct dma_map_ops dma_dummy_ops;
Christian Borntraegera8463d42016-02-02 21:46:32 -0800140
Andrew Morton8f286c32007-10-18 03:05:07 -0700141#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
Borislav Petkov34c65382007-10-18 03:05:06 -0700142
James Bottomley32e8f702007-10-16 01:23:55 -0700143#define DMA_MASK_NONE 0x0ULL
144
Rolf Eike Beerd6bd3a32006-09-29 01:59:48 -0700145static inline int valid_dma_direction(int dma_direction)
146{
147 return ((dma_direction == DMA_BIDIRECTIONAL) ||
148 (dma_direction == DMA_TO_DEVICE) ||
149 (dma_direction == DMA_FROM_DEVICE));
150}
151
James Bottomley32e8f702007-10-16 01:23:55 -0700152static inline int is_device_dma_capable(struct device *dev)
153{
154 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
155}
156
Christoph Hellwigff4c25f2019-02-03 20:12:02 +0100157#ifdef CONFIG_DMA_DECLARE_COHERENT
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800158/*
159 * These three functions are only for dma allocator.
160 * Don't use them in device drivers.
161 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100162int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800163 dma_addr_t *dma_handle, void **ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100164int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800165
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100166int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800167 void *cpu_addr, size_t size, int *ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100168
169void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
170int dma_release_from_global_coherent(int order, void *vaddr);
171int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
172 size_t size, int *ret);
173
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800174#else
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100175#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
176#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
177#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
178
179static inline void *dma_alloc_from_global_coherent(ssize_t size,
180 dma_addr_t *dma_handle)
181{
182 return NULL;
183}
184
185static inline int dma_release_from_global_coherent(int order, void *vaddr)
186{
187 return 0;
188}
189
190static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
191 void *cpu_addr, size_t size,
192 int *ret)
193{
194 return 0;
195}
Christoph Hellwigff4c25f2019-02-03 20:12:02 +0100196#endif /* CONFIG_DMA_DECLARE_COHERENT */
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800197
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800198static inline bool dma_is_direct(const struct dma_map_ops *ops)
199{
200 return likely(!ops);
201}
202
203/*
204 * All the dma_direct_* declarations are here just for the indirect call bypass,
205 * and must not be used directly drivers!
206 */
207dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
208 unsigned long offset, size_t size, enum dma_data_direction dir,
209 unsigned long attrs);
210int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
211 enum dma_data_direction dir, unsigned long attrs);
Christoph Hellwigcfced782019-01-04 18:20:05 +0100212dma_addr_t dma_direct_map_resource(struct device *dev, phys_addr_t paddr,
213 size_t size, enum dma_data_direction dir, unsigned long attrs);
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800214
215#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
216 defined(CONFIG_SWIOTLB)
217void dma_direct_sync_single_for_device(struct device *dev,
218 dma_addr_t addr, size_t size, enum dma_data_direction dir);
219void dma_direct_sync_sg_for_device(struct device *dev,
220 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
221#else
222static inline void dma_direct_sync_single_for_device(struct device *dev,
223 dma_addr_t addr, size_t size, enum dma_data_direction dir)
224{
225}
226static inline void dma_direct_sync_sg_for_device(struct device *dev,
227 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
228{
229}
230#endif
231
232#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
233 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
234 defined(CONFIG_SWIOTLB)
235void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
236 size_t size, enum dma_data_direction dir, unsigned long attrs);
237void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
238 int nents, enum dma_data_direction dir, unsigned long attrs);
239void dma_direct_sync_single_for_cpu(struct device *dev,
240 dma_addr_t addr, size_t size, enum dma_data_direction dir);
241void dma_direct_sync_sg_for_cpu(struct device *dev,
242 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
243#else
244static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
245 size_t size, enum dma_data_direction dir, unsigned long attrs)
246{
247}
248static inline void dma_direct_unmap_sg(struct device *dev,
249 struct scatterlist *sgl, int nents, enum dma_data_direction dir,
250 unsigned long attrs)
251{
252}
253static inline void dma_direct_sync_single_for_cpu(struct device *dev,
254 dma_addr_t addr, size_t size, enum dma_data_direction dir)
255{
256}
257static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
258 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
259{
260}
261#endif
262
Joerg Roedel133d6242019-02-07 12:59:15 +0100263size_t dma_direct_max_mapping_size(struct device *dev);
264
Christoph Hellwiged6ccf12018-12-26 07:52:13 +0100265#ifdef CONFIG_HAS_DMA
266#include <asm/dma-mapping.h>
267
268static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
269{
Christoph Hellwigd7e02a92019-03-13 18:45:21 +0100270 if (dev->dma_ops)
Christoph Hellwiged6ccf12018-12-26 07:52:13 +0100271 return dev->dma_ops;
Christoph Hellwigd7e02a92019-03-13 18:45:21 +0100272 return get_arch_dma_ops(dev->bus);
Christoph Hellwiged6ccf12018-12-26 07:52:13 +0100273}
274
275static inline void set_dma_ops(struct device *dev,
276 const struct dma_map_ops *dma_ops)
277{
278 dev->dma_ops = dma_ops;
279}
280
Christoph Hellwig2e05ea52018-12-25 08:50:35 +0100281static inline dma_addr_t dma_map_page_attrs(struct device *dev,
282 struct page *page, size_t offset, size_t size,
283 enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800284{
Bart Van Assche52997092017-01-20 13:04:01 -0800285 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800286 dma_addr_t addr;
287
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800288 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800289 if (dma_is_direct(ops))
Christoph Hellwig2e05ea52018-12-25 08:50:35 +0100290 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800291 else
Christoph Hellwig2e05ea52018-12-25 08:50:35 +0100292 addr = ops->map_page(dev, page, offset, size, dir, attrs);
293 debug_dma_map_page(dev, page, offset, size, dir, addr);
294
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800295 return addr;
296}
297
Christoph Hellwig2e05ea52018-12-25 08:50:35 +0100298static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
299 size_t size, enum dma_data_direction dir, unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800300{
Bart Van Assche52997092017-01-20 13:04:01 -0800301 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800302
303 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800304 if (dma_is_direct(ops))
305 dma_direct_unmap_page(dev, addr, size, dir, attrs);
306 else if (ops->unmap_page)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800307 ops->unmap_page(dev, addr, size, dir, attrs);
Christoph Hellwig2e05ea52018-12-25 08:50:35 +0100308 debug_dma_unmap_page(dev, addr, size, dir);
Christoph Hellwig7f0fee22018-12-06 12:24:27 -0800309}
310
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800311/*
312 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
313 * It should never return a value < 0.
314 */
315static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
316 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700317 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800318{
Bart Van Assche52997092017-01-20 13:04:01 -0800319 const struct dma_map_ops *ops = get_dma_ops(dev);
Levin, Alexander (Sasha Levin)49502762017-11-15 17:35:51 -0800320 int ents;
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800321
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800322 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800323 if (dma_is_direct(ops))
324 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
325 else
326 ents = ops->map_sg(dev, sg, nents, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800327 BUG_ON(ents < 0);
328 debug_dma_map_sg(dev, sg, nents, ents, dir);
329
330 return ents;
331}
332
333static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
334 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700335 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800336{
Bart Van Assche52997092017-01-20 13:04:01 -0800337 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800338
339 BUG_ON(!valid_dma_direction(dir));
340 debug_dma_unmap_sg(dev, sg, nents, dir);
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800341 if (dma_is_direct(ops))
342 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
343 else if (ops->unmap_sg)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800344 ops->unmap_sg(dev, sg, nents, dir, attrs);
345}
346
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200347static inline dma_addr_t dma_map_resource(struct device *dev,
348 phys_addr_t phys_addr,
349 size_t size,
350 enum dma_data_direction dir,
351 unsigned long attrs)
352{
Bart Van Assche52997092017-01-20 13:04:01 -0800353 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwigcfced782019-01-04 18:20:05 +0100354 dma_addr_t addr = DMA_MAPPING_ERROR;
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200355
356 BUG_ON(!valid_dma_direction(dir));
357
358 /* Don't allow RAM to be mapped */
Christoph Hellwig645386d2019-01-04 17:17:53 +0100359 if (WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr))))
360 return DMA_MAPPING_ERROR;
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200361
Christoph Hellwigcfced782019-01-04 18:20:05 +0100362 if (dma_is_direct(ops))
363 addr = dma_direct_map_resource(dev, phys_addr, size, dir, attrs);
364 else if (ops->map_resource)
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200365 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
366
367 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200368 return addr;
369}
370
371static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
372 size_t size, enum dma_data_direction dir,
373 unsigned long attrs)
374{
Bart Van Assche52997092017-01-20 13:04:01 -0800375 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200376
377 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwigcfced782019-01-04 18:20:05 +0100378 if (!dma_is_direct(ops) && ops->unmap_resource)
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200379 ops->unmap_resource(dev, addr, size, dir, attrs);
380 debug_dma_unmap_resource(dev, addr, size, dir);
381}
382
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800383static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
384 size_t size,
385 enum dma_data_direction dir)
386{
Bart Van Assche52997092017-01-20 13:04:01 -0800387 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800388
389 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800390 if (dma_is_direct(ops))
391 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
392 else if (ops->sync_single_for_cpu)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800393 ops->sync_single_for_cpu(dev, addr, size, dir);
394 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
395}
396
397static inline void dma_sync_single_for_device(struct device *dev,
398 dma_addr_t addr, size_t size,
399 enum dma_data_direction dir)
400{
Bart Van Assche52997092017-01-20 13:04:01 -0800401 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800402
403 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800404 if (dma_is_direct(ops))
405 dma_direct_sync_single_for_device(dev, addr, size, dir);
406 else if (ops->sync_single_for_device)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800407 ops->sync_single_for_device(dev, addr, size, dir);
408 debug_dma_sync_single_for_device(dev, addr, size, dir);
409}
410
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800411static inline void
412dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
413 int nelems, enum dma_data_direction dir)
414{
Bart Van Assche52997092017-01-20 13:04:01 -0800415 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800416
417 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800418 if (dma_is_direct(ops))
419 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
420 else if (ops->sync_sg_for_cpu)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800421 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
422 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
423}
424
425static inline void
426dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
427 int nelems, enum dma_data_direction dir)
428{
Bart Van Assche52997092017-01-20 13:04:01 -0800429 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800430
431 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800432 if (dma_is_direct(ops))
433 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
434 else if (ops->sync_sg_for_device)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800435 ops->sync_sg_for_device(dev, sg, nelems, dir);
436 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
437
438}
439
Christoph Hellwiged6ccf12018-12-26 07:52:13 +0100440static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
441{
442 debug_dma_mapping_error(dev, dma_addr);
443
444 if (dma_addr == DMA_MAPPING_ERROR)
445 return -ENOMEM;
446 return 0;
447}
448
449void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
450 gfp_t flag, unsigned long attrs);
451void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
452 dma_addr_t dma_handle, unsigned long attrs);
453void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
454 gfp_t gfp, unsigned long attrs);
455void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
456 dma_addr_t dma_handle);
457void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
458 enum dma_data_direction dir);
459int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
460 void *cpu_addr, dma_addr_t dma_addr, size_t size,
461 unsigned long attrs);
462int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
463 void *cpu_addr, dma_addr_t dma_addr, size_t size,
464 unsigned long attrs);
465int dma_supported(struct device *dev, u64 mask);
466int dma_set_mask(struct device *dev, u64 mask);
467int dma_set_coherent_mask(struct device *dev, u64 mask);
468u64 dma_get_required_mask(struct device *dev);
Joerg Roedel133d6242019-02-07 12:59:15 +0100469size_t dma_max_mapping_size(struct device *dev);
Christoph Hellwiged6ccf12018-12-26 07:52:13 +0100470#else /* CONFIG_HAS_DMA */
471static inline dma_addr_t dma_map_page_attrs(struct device *dev,
472 struct page *page, size_t offset, size_t size,
473 enum dma_data_direction dir, unsigned long attrs)
474{
475 return DMA_MAPPING_ERROR;
476}
477static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
478 size_t size, enum dma_data_direction dir, unsigned long attrs)
479{
480}
481static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
482 int nents, enum dma_data_direction dir, unsigned long attrs)
483{
484 return 0;
485}
486static inline void dma_unmap_sg_attrs(struct device *dev,
487 struct scatterlist *sg, int nents, enum dma_data_direction dir,
488 unsigned long attrs)
489{
490}
491static inline dma_addr_t dma_map_resource(struct device *dev,
492 phys_addr_t phys_addr, size_t size, enum dma_data_direction dir,
493 unsigned long attrs)
494{
495 return DMA_MAPPING_ERROR;
496}
497static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
498 size_t size, enum dma_data_direction dir, unsigned long attrs)
499{
500}
501static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
502 size_t size, enum dma_data_direction dir)
503{
504}
505static inline void dma_sync_single_for_device(struct device *dev,
506 dma_addr_t addr, size_t size, enum dma_data_direction dir)
507{
508}
509static inline void dma_sync_sg_for_cpu(struct device *dev,
510 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
511{
512}
513static inline void dma_sync_sg_for_device(struct device *dev,
514 struct scatterlist *sg, int nelems, enum dma_data_direction dir)
515{
516}
517static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
518{
519 return -ENOMEM;
520}
521static inline void *dma_alloc_attrs(struct device *dev, size_t size,
522 dma_addr_t *dma_handle, gfp_t flag, unsigned long attrs)
523{
524 return NULL;
525}
526static void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
527 dma_addr_t dma_handle, unsigned long attrs)
528{
529}
530static inline void *dmam_alloc_attrs(struct device *dev, size_t size,
531 dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
532{
533 return NULL;
534}
535static inline void dmam_free_coherent(struct device *dev, size_t size,
536 void *vaddr, dma_addr_t dma_handle)
537{
538}
539static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
540 enum dma_data_direction dir)
541{
542}
543static inline int dma_get_sgtable_attrs(struct device *dev,
544 struct sg_table *sgt, void *cpu_addr, dma_addr_t dma_addr,
545 size_t size, unsigned long attrs)
546{
547 return -ENXIO;
548}
549static inline int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
550 void *cpu_addr, dma_addr_t dma_addr, size_t size,
551 unsigned long attrs)
552{
553 return -ENXIO;
554}
555static inline int dma_supported(struct device *dev, u64 mask)
556{
557 return 0;
558}
559static inline int dma_set_mask(struct device *dev, u64 mask)
560{
561 return -EIO;
562}
563static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
564{
565 return -EIO;
566}
567static inline u64 dma_get_required_mask(struct device *dev)
568{
569 return 0;
570}
Joerg Roedel133d6242019-02-07 12:59:15 +0100571static inline size_t dma_max_mapping_size(struct device *dev)
572{
573 return 0;
574}
Christoph Hellwiged6ccf12018-12-26 07:52:13 +0100575#endif /* CONFIG_HAS_DMA */
576
Christoph Hellwig2e05ea52018-12-25 08:50:35 +0100577static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
578 size_t size, enum dma_data_direction dir, unsigned long attrs)
579{
580 debug_dma_map_single(dev, ptr, size);
581 return dma_map_page_attrs(dev, virt_to_page(ptr), offset_in_page(ptr),
582 size, dir, attrs);
583}
584
585static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
586 size_t size, enum dma_data_direction dir, unsigned long attrs)
587{
588 return dma_unmap_page_attrs(dev, addr, size, dir, attrs);
589}
590
Christoph Hellwiged6ccf12018-12-26 07:52:13 +0100591static inline void dma_sync_single_range_for_cpu(struct device *dev,
592 dma_addr_t addr, unsigned long offset, size_t size,
593 enum dma_data_direction dir)
594{
595 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
596}
597
598static inline void dma_sync_single_range_for_device(struct device *dev,
599 dma_addr_t addr, unsigned long offset, size_t size,
600 enum dma_data_direction dir)
601{
602 return dma_sync_single_for_device(dev, addr + offset, size, dir);
603}
604
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700605#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
606#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
607#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
608#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800609#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
610#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
Christoph Hellwiged6ccf12018-12-26 07:52:13 +0100611#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
612#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
Christoph Hellwigc9eb6172017-08-27 10:37:15 +0200613
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800614extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
Christoph Hellwig58b04402018-09-11 08:55:28 +0200615 void *cpu_addr, dma_addr_t dma_addr, size_t size,
616 unsigned long attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800617
618void *dma_common_contiguous_remap(struct page *page, size_t size,
619 unsigned long vm_flags,
620 pgprot_t prot, const void *caller);
621
622void *dma_common_pages_remap(struct page **pages, size_t size,
623 unsigned long vm_flags, pgprot_t prot,
624 const void *caller);
625void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
626
Christoph Hellwig0c3b3172018-11-04 20:29:28 +0100627int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot);
628bool dma_in_atomic_pool(void *start, size_t size);
629void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
630bool dma_free_from_pool(void *start, size_t size);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800631
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800632int
Christoph Hellwig9406a492018-08-23 09:39:38 +0200633dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
634 dma_addr_t dma_addr, size_t size, unsigned long attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800635
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800636static inline void *dma_alloc_coherent(struct device *dev, size_t size,
Christoph Hellwig7ed1d912018-09-24 13:06:58 +0200637 dma_addr_t *dma_handle, gfp_t gfp)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800638{
Christoph Hellwig7ed1d912018-09-24 13:06:58 +0200639
640 return dma_alloc_attrs(dev, size, dma_handle, gfp,
641 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800642}
643
644static inline void dma_free_coherent(struct device *dev, size_t size,
645 void *cpu_addr, dma_addr_t dma_handle)
646{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700647 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800648}
649
Linus Torvalds1da177e2005-04-16 15:20:36 -0700650
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900651static inline u64 dma_get_mask(struct device *dev)
652{
Christoph Hellwigd7e02a92019-03-13 18:45:21 +0100653 if (dev->dma_mask && *dev->dma_mask)
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900654 return *dev->dma_mask;
Yang Hongyang284901a2009-04-06 19:01:15 -0700655 return DMA_BIT_MASK(32);
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900656}
657
Russell King4aa806b2013-06-26 13:49:44 +0100658/*
659 * Set both the DMA mask and the coherent DMA mask to the same thing.
660 * Note that we don't check the return value from dma_set_coherent_mask()
661 * as the DMA API guarantees that the coherent DMA mask can be set to
662 * the same or smaller than the streaming DMA mask.
663 */
664static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
665{
666 int rc = dma_set_mask(dev, mask);
667 if (rc == 0)
668 dma_set_coherent_mask(dev, mask);
669 return rc;
670}
671
Russell Kingfa6a8d62013-06-27 12:21:45 +0100672/*
673 * Similar to the above, except it deals with the case where the device
674 * does not have dev->dma_mask appropriately setup.
675 */
676static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
677{
678 dev->dma_mask = &dev->coherent_dma_mask;
679 return dma_set_mask_and_coherent(dev, mask);
680}
681
Christoph Hellwig347cb6a2019-01-07 13:36:20 -0500682#ifdef CONFIG_ARCH_HAS_SETUP_DMA_OPS
683void arch_setup_dma_ops(struct device *dev, u64 dma_base, u64 size,
684 const struct iommu_ops *iommu, bool coherent);
685#else
Will Deacon97890ba2014-08-27 16:24:20 +0100686static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
Christoph Hellwig347cb6a2019-01-07 13:36:20 -0500687 u64 size, const struct iommu_ops *iommu, bool coherent)
688{
689}
690#endif /* CONFIG_ARCH_HAS_SETUP_DMA_OPS */
Will Deacon97890ba2014-08-27 16:24:20 +0100691
Christoph Hellwigdc2acde2018-12-21 22:14:44 +0100692#ifdef CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS
693void arch_teardown_dma_ops(struct device *dev);
694#else
695static inline void arch_teardown_dma_ops(struct device *dev)
696{
697}
698#endif /* CONFIG_ARCH_HAS_TEARDOWN_DMA_OPS */
Santosh Shilimkar591c1ee2014-04-24 11:30:04 -0400699
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800700static inline unsigned int dma_get_max_seg_size(struct device *dev)
701{
Robin Murphy002edb62015-11-06 16:32:51 -0800702 if (dev->dma_parms && dev->dma_parms->max_segment_size)
703 return dev->dma_parms->max_segment_size;
704 return SZ_64K;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800705}
706
Niklas Söderlundc9d76d02018-08-29 23:29:21 +0200707static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800708{
709 if (dev->dma_parms) {
710 dev->dma_parms->max_segment_size = size;
711 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800712 }
713 return -EIO;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800714}
715
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800716static inline unsigned long dma_get_seg_boundary(struct device *dev)
717{
Robin Murphy002edb62015-11-06 16:32:51 -0800718 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
719 return dev->dma_parms->segment_boundary_mask;
720 return DMA_BIT_MASK(32);
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800721}
722
723static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
724{
725 if (dev->dma_parms) {
726 dev->dma_parms->segment_boundary_mask = mask;
727 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800728 }
729 return -EIO;
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800730}
731
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100732#ifndef dma_max_pfn
733static inline unsigned long dma_max_pfn(struct device *dev)
734{
Christoph Hellwiga41ef1e2017-11-30 07:32:51 -0800735 return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100736}
737#endif
738
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700739static inline int dma_get_cache_alignment(void)
740{
741#ifdef ARCH_DMA_MINALIGN
742 return ARCH_DMA_MINALIGN;
743#endif
744 return 1;
745}
746
Christoph Hellwigff4c25f2019-02-03 20:12:02 +0100747#ifdef CONFIG_DMA_DECLARE_COHERENT
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800748int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Christoph Hellwig82c5de02018-12-25 13:29:54 +0100749 dma_addr_t device_addr, size_t size);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800750void dma_release_declared_memory(struct device *dev);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800751#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752static inline int
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600753dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Christoph Hellwig82c5de02018-12-25 13:29:54 +0100754 dma_addr_t device_addr, size_t size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700755{
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200756 return -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757}
758
759static inline void
760dma_release_declared_memory(struct device *dev)
761{
762}
Christoph Hellwigff4c25f2019-02-03 20:12:02 +0100763#endif /* CONFIG_DMA_DECLARE_COHERENT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700764
Christoph Hellwigd7076f02018-12-25 17:44:19 +0100765static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
766 dma_addr_t *dma_handle, gfp_t gfp)
767{
768 return dmam_alloc_attrs(dev, size, dma_handle, gfp,
769 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
770}
771
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800772static inline void *dma_alloc_wc(struct device *dev, size_t size,
773 dma_addr_t *dma_addr, gfp_t gfp)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200774{
Christoph Hellwig0cd60eb2018-12-22 09:21:08 +0100775 unsigned long attrs = DMA_ATTR_WRITE_COMBINE;
Christoph Hellwig7ed1d912018-09-24 13:06:58 +0200776
777 if (gfp & __GFP_NOWARN)
778 attrs |= DMA_ATTR_NO_WARN;
779
780 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200781}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800782#ifndef dma_alloc_writecombine
783#define dma_alloc_writecombine dma_alloc_wc
784#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200785
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800786static inline void dma_free_wc(struct device *dev, size_t size,
787 void *cpu_addr, dma_addr_t dma_addr)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200788{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700789 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
790 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200791}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800792#ifndef dma_free_writecombine
793#define dma_free_writecombine dma_free_wc
794#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200795
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800796static inline int dma_mmap_wc(struct device *dev,
797 struct vm_area_struct *vma,
798 void *cpu_addr, dma_addr_t dma_addr,
799 size_t size)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200800{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700801 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
802 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200803}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800804#ifndef dma_mmap_writecombine
805#define dma_mmap_writecombine dma_mmap_wc
806#endif
Arthur Kepner74bc7ce2008-04-29 01:00:30 -0700807
Christoph Hellwigf616ab52018-05-09 06:53:49 +0200808#ifdef CONFIG_NEED_DMA_MAP_STATE
FUJITA Tomonori0acedc12010-03-10 15:23:31 -0800809#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
810#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
811#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
812#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
813#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
814#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
815#else
816#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
817#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
818#define dma_unmap_addr(PTR, ADDR_NAME) (0)
819#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
820#define dma_unmap_len(PTR, LEN_NAME) (0)
821#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
822#endif
823
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824#endif