blob: f422aec0f53cfba6180d942761739cfe7f3e6684 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Robert P. J. Day96532ba2008-02-03 15:06:26 +02002#ifndef _LINUX_DMA_MAPPING_H
3#define _LINUX_DMA_MAPPING_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
Robin Murphy002edb62015-11-06 16:32:51 -08005#include <linux/sizes.h>
Andrew Morton842fa692011-11-02 13:39:33 -07006#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07007#include <linux/device.h>
8#include <linux/err.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -08009#include <linux/dma-debug.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +000010#include <linux/dma-direction.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090011#include <linux/scatterlist.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -080012#include <linux/bug.h>
Tom Lendacky648babb2017-07-17 16:10:22 -050013#include <linux/mem_encrypt.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070015/**
16 * List of possible attributes associated with a DMA mapping. The semantics
17 * of each attribute should be defined in Documentation/DMA-attributes.txt.
18 *
19 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
20 * forces all pending DMA writes to complete.
21 */
22#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
23/*
24 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
25 * may be weakly ordered, that is that reads and writes may pass each other.
26 */
27#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
28/*
29 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
30 * buffered to improve performance.
31 */
32#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
33/*
34 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
35 * consistent or non-consistent memory as it sees fit.
36 */
37#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
38/*
39 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
40 * virtual mapping for the allocated buffer.
41 */
42#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
43/*
44 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
45 * the CPU cache for the given buffer assuming that it has been already
46 * transferred to 'device' domain.
47 */
48#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
49/*
50 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
51 * in physical memory.
52 */
53#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
54/*
55 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
56 * that it's probably not worth the time to try to allocate memory to in a way
57 * that gives better TLB efficiency.
58 */
59#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
Mauricio Faria de Oliveiraa9a62c92016-10-11 13:54:14 -070060/*
61 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
62 * allocation failure reports (similarly to __GFP_NOWARN).
63 */
64#define DMA_ATTR_NO_WARN (1UL << 8)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070065
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060066/*
Mitchel Humpherysb2fb3662017-01-06 18:58:11 +053067 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
68 * accessible at an elevated privilege level (and ideally inaccessible or
69 * at least read-only at lesser-privileged levels).
70 */
71#define DMA_ATTR_PRIVILEGED (1UL << 9)
72
73/*
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060074 * A dma_addr_t can hold any valid DMA or bus address for the platform.
75 * It can be given to a device to use as a DMA source or target. A CPU cannot
76 * reference a dma_addr_t directly because there may be translation between
77 * its physical address space and the bus address space.
78 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090079struct dma_map_ops {
Marek Szyprowski613c4572012-03-28 16:36:27 +020080 void* (*alloc)(struct device *dev, size_t size,
81 dma_addr_t *dma_handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070082 unsigned long attrs);
Marek Szyprowski613c4572012-03-28 16:36:27 +020083 void (*free)(struct device *dev, size_t size,
84 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070085 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010086 int (*mmap)(struct device *, struct vm_area_struct *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070087 void *, dma_addr_t, size_t,
88 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010089
Marek Szyprowskid2b74282012-06-13 10:05:52 +020090 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070091 dma_addr_t, size_t, unsigned long attrs);
Marek Szyprowskid2b74282012-06-13 10:05:52 +020092
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090093 dma_addr_t (*map_page)(struct device *dev, struct page *page,
94 unsigned long offset, size_t size,
95 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070096 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090097 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
98 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070099 unsigned long attrs);
Ricardo Ribalda Delgado04abab62015-02-11 13:53:15 +0100100 /*
101 * map_sg returns 0 on error and a value > 0 on success.
102 * It should never return a value < 0.
103 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900104 int (*map_sg)(struct device *dev, struct scatterlist *sg,
105 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700106 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900107 void (*unmap_sg)(struct device *dev,
108 struct scatterlist *sg, int nents,
109 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700110 unsigned long attrs);
Niklas Söderlundba409b32016-08-10 13:22:14 +0200111 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
112 size_t size, enum dma_data_direction dir,
113 unsigned long attrs);
114 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
115 size_t size, enum dma_data_direction dir,
116 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900117 void (*sync_single_for_cpu)(struct device *dev,
118 dma_addr_t dma_handle, size_t size,
119 enum dma_data_direction dir);
120 void (*sync_single_for_device)(struct device *dev,
121 dma_addr_t dma_handle, size_t size,
122 enum dma_data_direction dir);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900123 void (*sync_sg_for_cpu)(struct device *dev,
124 struct scatterlist *sg, int nents,
125 enum dma_data_direction dir);
126 void (*sync_sg_for_device)(struct device *dev,
127 struct scatterlist *sg, int nents,
128 enum dma_data_direction dir);
Christoph Hellwigc9eb6172017-08-27 10:37:15 +0200129 void (*cache_sync)(struct device *dev, void *vaddr, size_t size,
130 enum dma_data_direction direction);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900131 int (*dma_supported)(struct device *dev, u64 mask);
Milton Miller3a8f7552011-06-24 09:05:23 +0000132 u64 (*get_required_mask)(struct device *dev);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900133};
134
Christoph Hellwig42ee3ca2018-11-21 18:52:35 +0100135#define DMA_MAPPING_ERROR (~(dma_addr_t)0)
136
Bart Van Assche551199a2017-01-20 13:04:07 -0800137extern const struct dma_map_ops dma_virt_ops;
Robin Murphy90ac7062018-12-06 13:14:44 -0800138extern const struct dma_map_ops dma_dummy_ops;
Christian Borntraegera8463d42016-02-02 21:46:32 -0800139
Andrew Morton8f286c32007-10-18 03:05:07 -0700140#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
Borislav Petkov34c65382007-10-18 03:05:06 -0700141
James Bottomley32e8f702007-10-16 01:23:55 -0700142#define DMA_MASK_NONE 0x0ULL
143
Rolf Eike Beerd6bd3a32006-09-29 01:59:48 -0700144static inline int valid_dma_direction(int dma_direction)
145{
146 return ((dma_direction == DMA_BIDIRECTIONAL) ||
147 (dma_direction == DMA_TO_DEVICE) ||
148 (dma_direction == DMA_FROM_DEVICE));
149}
150
James Bottomley32e8f702007-10-16 01:23:55 -0700151static inline int is_device_dma_capable(struct device *dev)
152{
153 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
154}
155
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800156#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
157/*
158 * These three functions are only for dma allocator.
159 * Don't use them in device drivers.
160 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100161int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800162 dma_addr_t *dma_handle, void **ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100163int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800164
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100165int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800166 void *cpu_addr, size_t size, int *ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100167
168void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
169int dma_release_from_global_coherent(int order, void *vaddr);
170int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
171 size_t size, int *ret);
172
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800173#else
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100174#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
175#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
176#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
177
178static inline void *dma_alloc_from_global_coherent(ssize_t size,
179 dma_addr_t *dma_handle)
180{
181 return NULL;
182}
183
184static inline int dma_release_from_global_coherent(int order, void *vaddr)
185{
186 return 0;
187}
188
189static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
190 void *cpu_addr, size_t size,
191 int *ret)
192{
193 return 0;
194}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800195#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
196
Dan Williams1b0fac42007-07-15 23:40:26 -0700197#ifdef CONFIG_HAS_DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198#include <asm/dma-mapping.h>
Bart Van Assche815dd182017-01-20 13:04:04 -0800199static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
200{
201 if (dev && dev->dma_ops)
202 return dev->dma_ops;
203 return get_arch_dma_ops(dev ? dev->bus : NULL);
204}
205
Bart Van Asscheca6e8e12017-01-20 13:04:03 -0800206static inline void set_dma_ops(struct device *dev,
207 const struct dma_map_ops *dma_ops)
208{
209 dev->dma_ops = dma_ops;
210}
Dan Williams1b0fac42007-07-15 23:40:26 -0700211#else
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800212/*
Geert Uytterhoevenf29ab492018-03-16 14:25:40 +0100213 * Define the dma api to allow compilation of dma dependent code.
214 * Code that depends on the dma-mapping API needs to set 'depends on HAS_DMA'
215 * in its Kconfig, unless it already depends on <something> || COMPILE_TEST,
216 * where <something> guarantuees the availability of the dma-mapping API.
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800217 */
Bart Van Assche52997092017-01-20 13:04:01 -0800218static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800219{
Geert Uytterhoevenf29ab492018-03-16 14:25:40 +0100220 return NULL;
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800221}
222#endif
223
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800224static inline bool dma_is_direct(const struct dma_map_ops *ops)
225{
226 return likely(!ops);
227}
228
229/*
230 * All the dma_direct_* declarations are here just for the indirect call bypass,
231 * and must not be used directly drivers!
232 */
233dma_addr_t dma_direct_map_page(struct device *dev, struct page *page,
234 unsigned long offset, size_t size, enum dma_data_direction dir,
235 unsigned long attrs);
236int dma_direct_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
237 enum dma_data_direction dir, unsigned long attrs);
238
239#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \
240 defined(CONFIG_SWIOTLB)
241void dma_direct_sync_single_for_device(struct device *dev,
242 dma_addr_t addr, size_t size, enum dma_data_direction dir);
243void dma_direct_sync_sg_for_device(struct device *dev,
244 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
245#else
246static inline void dma_direct_sync_single_for_device(struct device *dev,
247 dma_addr_t addr, size_t size, enum dma_data_direction dir)
248{
249}
250static inline void dma_direct_sync_sg_for_device(struct device *dev,
251 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
252{
253}
254#endif
255
256#if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \
257 defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) || \
258 defined(CONFIG_SWIOTLB)
259void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
260 size_t size, enum dma_data_direction dir, unsigned long attrs);
261void dma_direct_unmap_sg(struct device *dev, struct scatterlist *sgl,
262 int nents, enum dma_data_direction dir, unsigned long attrs);
263void dma_direct_sync_single_for_cpu(struct device *dev,
264 dma_addr_t addr, size_t size, enum dma_data_direction dir);
265void dma_direct_sync_sg_for_cpu(struct device *dev,
266 struct scatterlist *sgl, int nents, enum dma_data_direction dir);
267#else
268static inline void dma_direct_unmap_page(struct device *dev, dma_addr_t addr,
269 size_t size, enum dma_data_direction dir, unsigned long attrs)
270{
271}
272static inline void dma_direct_unmap_sg(struct device *dev,
273 struct scatterlist *sgl, int nents, enum dma_data_direction dir,
274 unsigned long attrs)
275{
276}
277static inline void dma_direct_sync_single_for_cpu(struct device *dev,
278 dma_addr_t addr, size_t size, enum dma_data_direction dir)
279{
280}
281static inline void dma_direct_sync_sg_for_cpu(struct device *dev,
282 struct scatterlist *sgl, int nents, enum dma_data_direction dir)
283{
284}
285#endif
286
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800287static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
288 size_t size,
289 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700290 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800291{
Bart Van Assche52997092017-01-20 13:04:01 -0800292 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800293 dma_addr_t addr;
294
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800295 BUG_ON(!valid_dma_direction(dir));
Stephen Boyd99c65fa2018-10-08 00:20:07 -0700296 debug_dma_map_single(dev, ptr, size);
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800297 if (dma_is_direct(ops))
298 addr = dma_direct_map_page(dev, virt_to_page(ptr),
299 offset_in_page(ptr), size, dir, attrs);
300 else
301 addr = ops->map_page(dev, virt_to_page(ptr),
302 offset_in_page(ptr), size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800303 debug_dma_map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800304 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800305 dir, addr, true);
306 return addr;
307}
308
309static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
310 size_t size,
311 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700312 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800313{
Bart Van Assche52997092017-01-20 13:04:01 -0800314 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800315
316 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800317 if (dma_is_direct(ops))
318 dma_direct_unmap_page(dev, addr, size, dir, attrs);
319 else if (ops->unmap_page)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800320 ops->unmap_page(dev, addr, size, dir, attrs);
321 debug_dma_unmap_page(dev, addr, size, dir, true);
322}
323
Christoph Hellwig7f0fee22018-12-06 12:24:27 -0800324static inline void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr,
325 size_t size, enum dma_data_direction dir, unsigned long attrs)
326{
327 return dma_unmap_single_attrs(dev, addr, size, dir, attrs);
328}
329
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800330/*
331 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
332 * It should never return a value < 0.
333 */
334static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
335 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700336 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800337{
Bart Van Assche52997092017-01-20 13:04:01 -0800338 const struct dma_map_ops *ops = get_dma_ops(dev);
Levin, Alexander (Sasha Levin)49502762017-11-15 17:35:51 -0800339 int ents;
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800340
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800341 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800342 if (dma_is_direct(ops))
343 ents = dma_direct_map_sg(dev, sg, nents, dir, attrs);
344 else
345 ents = ops->map_sg(dev, sg, nents, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800346 BUG_ON(ents < 0);
347 debug_dma_map_sg(dev, sg, nents, ents, dir);
348
349 return ents;
350}
351
352static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
353 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700354 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800355{
Bart Van Assche52997092017-01-20 13:04:01 -0800356 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800357
358 BUG_ON(!valid_dma_direction(dir));
359 debug_dma_unmap_sg(dev, sg, nents, dir);
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800360 if (dma_is_direct(ops))
361 dma_direct_unmap_sg(dev, sg, nents, dir, attrs);
362 else if (ops->unmap_sg)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800363 ops->unmap_sg(dev, sg, nents, dir, attrs);
364}
365
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800366static inline dma_addr_t dma_map_page_attrs(struct device *dev,
367 struct page *page,
368 size_t offset, size_t size,
369 enum dma_data_direction dir,
370 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800371{
Bart Van Assche52997092017-01-20 13:04:01 -0800372 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800373 dma_addr_t addr;
374
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800375 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800376 if (dma_is_direct(ops))
377 addr = dma_direct_map_page(dev, page, offset, size, dir, attrs);
378 else
379 addr = ops->map_page(dev, page, offset, size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800380 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
381
382 return addr;
383}
384
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200385static inline dma_addr_t dma_map_resource(struct device *dev,
386 phys_addr_t phys_addr,
387 size_t size,
388 enum dma_data_direction dir,
389 unsigned long attrs)
390{
Bart Van Assche52997092017-01-20 13:04:01 -0800391 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200392 dma_addr_t addr;
393
394 BUG_ON(!valid_dma_direction(dir));
395
396 /* Don't allow RAM to be mapped */
Niklas Söderlund3757dc42016-09-29 12:02:40 +0200397 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200398
399 addr = phys_addr;
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800400 if (ops && ops->map_resource)
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200401 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
402
403 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
404
405 return addr;
406}
407
408static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
409 size_t size, enum dma_data_direction dir,
410 unsigned long attrs)
411{
Bart Van Assche52997092017-01-20 13:04:01 -0800412 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200413
414 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800415 if (ops && ops->unmap_resource)
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200416 ops->unmap_resource(dev, addr, size, dir, attrs);
417 debug_dma_unmap_resource(dev, addr, size, dir);
418}
419
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800420static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
421 size_t size,
422 enum dma_data_direction dir)
423{
Bart Van Assche52997092017-01-20 13:04:01 -0800424 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800425
426 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800427 if (dma_is_direct(ops))
428 dma_direct_sync_single_for_cpu(dev, addr, size, dir);
429 else if (ops->sync_single_for_cpu)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800430 ops->sync_single_for_cpu(dev, addr, size, dir);
431 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
432}
433
Christoph Hellwig8d59b5f2018-12-03 14:58:59 +0100434static inline void dma_sync_single_range_for_cpu(struct device *dev,
435 dma_addr_t addr, unsigned long offset, size_t size,
436 enum dma_data_direction dir)
437{
438 return dma_sync_single_for_cpu(dev, addr + offset, size, dir);
439}
440
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800441static inline void dma_sync_single_for_device(struct device *dev,
442 dma_addr_t addr, size_t size,
443 enum dma_data_direction dir)
444{
Bart Van Assche52997092017-01-20 13:04:01 -0800445 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800446
447 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800448 if (dma_is_direct(ops))
449 dma_direct_sync_single_for_device(dev, addr, size, dir);
450 else if (ops->sync_single_for_device)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800451 ops->sync_single_for_device(dev, addr, size, dir);
452 debug_dma_sync_single_for_device(dev, addr, size, dir);
453}
454
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800455static inline void dma_sync_single_range_for_device(struct device *dev,
Christoph Hellwig8d59b5f2018-12-03 14:58:59 +0100456 dma_addr_t addr, unsigned long offset, size_t size,
457 enum dma_data_direction dir)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800458{
Christoph Hellwig8d59b5f2018-12-03 14:58:59 +0100459 return dma_sync_single_for_device(dev, addr + offset, size, dir);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800460}
461
462static inline void
463dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
464 int nelems, enum dma_data_direction dir)
465{
Bart Van Assche52997092017-01-20 13:04:01 -0800466 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800467
468 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800469 if (dma_is_direct(ops))
470 dma_direct_sync_sg_for_cpu(dev, sg, nelems, dir);
471 else if (ops->sync_sg_for_cpu)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800472 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
473 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
474}
475
476static inline void
477dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
478 int nelems, enum dma_data_direction dir)
479{
Bart Van Assche52997092017-01-20 13:04:01 -0800480 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800481
482 BUG_ON(!valid_dma_direction(dir));
Christoph Hellwig356da6d2018-12-06 13:39:32 -0800483 if (dma_is_direct(ops))
484 dma_direct_sync_sg_for_device(dev, sg, nelems, dir);
485 else if (ops->sync_sg_for_device)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800486 ops->sync_sg_for_device(dev, sg, nelems, dir);
487 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
488
489}
490
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700491#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
492#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
493#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
494#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800495#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
496#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800497
Christoph Hellwig8ddbe592018-12-06 12:47:50 -0800498void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
499 enum dma_data_direction dir);
Christoph Hellwigc9eb6172017-08-27 10:37:15 +0200500
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800501extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
Christoph Hellwig58b04402018-09-11 08:55:28 +0200502 void *cpu_addr, dma_addr_t dma_addr, size_t size,
503 unsigned long attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800504
505void *dma_common_contiguous_remap(struct page *page, size_t size,
506 unsigned long vm_flags,
507 pgprot_t prot, const void *caller);
508
509void *dma_common_pages_remap(struct page **pages, size_t size,
510 unsigned long vm_flags, pgprot_t prot,
511 const void *caller);
512void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
513
Christoph Hellwig0c3b3172018-11-04 20:29:28 +0100514int __init dma_atomic_pool_init(gfp_t gfp, pgprot_t prot);
515bool dma_in_atomic_pool(void *start, size_t size);
516void *dma_alloc_from_pool(size_t size, struct page **ret_page, gfp_t flags);
517bool dma_free_from_pool(void *start, size_t size);
518
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800519int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma,
520 void *cpu_addr, dma_addr_t dma_addr, size_t size,
521 unsigned long attrs);
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700522#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800523
524int
Christoph Hellwig9406a492018-08-23 09:39:38 +0200525dma_common_get_sgtable(struct device *dev, struct sg_table *sgt, void *cpu_addr,
526 dma_addr_t dma_addr, size_t size, unsigned long attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800527
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800528int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt,
529 void *cpu_addr, dma_addr_t dma_addr, size_t size,
530 unsigned long attrs);
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700531#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800532
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800533void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle,
534 gfp_t flag, unsigned long attrs);
535void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr,
536 dma_addr_t dma_handle, unsigned long attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800537
538static inline void *dma_alloc_coherent(struct device *dev, size_t size,
Christoph Hellwig7ed1d912018-09-24 13:06:58 +0200539 dma_addr_t *dma_handle, gfp_t gfp)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800540{
Christoph Hellwig7ed1d912018-09-24 13:06:58 +0200541
542 return dma_alloc_attrs(dev, size, dma_handle, gfp,
543 (gfp & __GFP_NOWARN) ? DMA_ATTR_NO_WARN : 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800544}
545
546static inline void dma_free_coherent(struct device *dev, size_t size,
547 void *cpu_addr, dma_addr_t dma_handle)
548{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700549 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800550}
551
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800552static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
553{
Robin Murphy5237e952017-07-24 18:29:27 +0100554 debug_dma_mapping_error(dev, dma_addr);
Christoph Hellwig42ee3ca2018-11-21 18:52:35 +0100555
Christoph Hellwig42ee3ca2018-11-21 18:52:35 +0100556 if (dma_addr == DMA_MAPPING_ERROR)
Christoph Hellwigb14b9d22018-11-30 10:59:37 +0100557 return -ENOMEM;
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800558 return 0;
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800559}
560
Christoph Hellwig7249c1a2018-12-06 12:43:30 -0800561int dma_supported(struct device *dev, u64 mask);
562int dma_set_mask(struct device *dev, u64 mask);
563int dma_set_coherent_mask(struct device *dev, u64 mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700564
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900565static inline u64 dma_get_mask(struct device *dev)
566{
FUJITA Tomonori07a2c012008-09-19 02:02:05 +0900567 if (dev && dev->dma_mask && *dev->dma_mask)
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900568 return *dev->dma_mask;
Yang Hongyang284901a2009-04-06 19:01:15 -0700569 return DMA_BIT_MASK(32);
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900570}
571
Russell King4aa806b2013-06-26 13:49:44 +0100572/*
573 * Set both the DMA mask and the coherent DMA mask to the same thing.
574 * Note that we don't check the return value from dma_set_coherent_mask()
575 * as the DMA API guarantees that the coherent DMA mask can be set to
576 * the same or smaller than the streaming DMA mask.
577 */
578static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
579{
580 int rc = dma_set_mask(dev, mask);
581 if (rc == 0)
582 dma_set_coherent_mask(dev, mask);
583 return rc;
584}
585
Russell Kingfa6a8d62013-06-27 12:21:45 +0100586/*
587 * Similar to the above, except it deals with the case where the device
588 * does not have dev->dma_mask appropriately setup.
589 */
590static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
591{
592 dev->dma_mask = &dev->coherent_dma_mask;
593 return dma_set_mask_and_coherent(dev, mask);
594}
595
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596extern u64 dma_get_required_mask(struct device *dev);
597
Will Deacona3a60f82014-08-27 15:49:10 +0100598#ifndef arch_setup_dma_ops
Will Deacon97890ba2014-08-27 16:24:20 +0100599static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
Robin Murphy53c92d72016-04-07 18:42:05 +0100600 u64 size, const struct iommu_ops *iommu,
Will Deacon97890ba2014-08-27 16:24:20 +0100601 bool coherent) { }
602#endif
603
604#ifndef arch_teardown_dma_ops
Christoph Hellwig1a0afc12018-09-25 13:16:55 -0700605static inline void arch_teardown_dma_ops(struct device *dev) { }
Santosh Shilimkar591c1ee2014-04-24 11:30:04 -0400606#endif
607
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800608static inline unsigned int dma_get_max_seg_size(struct device *dev)
609{
Robin Murphy002edb62015-11-06 16:32:51 -0800610 if (dev->dma_parms && dev->dma_parms->max_segment_size)
611 return dev->dma_parms->max_segment_size;
612 return SZ_64K;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800613}
614
Niklas Söderlundc9d76d02018-08-29 23:29:21 +0200615static inline int dma_set_max_seg_size(struct device *dev, unsigned int size)
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800616{
617 if (dev->dma_parms) {
618 dev->dma_parms->max_segment_size = size;
619 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800620 }
621 return -EIO;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800622}
623
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800624static inline unsigned long dma_get_seg_boundary(struct device *dev)
625{
Robin Murphy002edb62015-11-06 16:32:51 -0800626 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
627 return dev->dma_parms->segment_boundary_mask;
628 return DMA_BIT_MASK(32);
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800629}
630
631static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
632{
633 if (dev->dma_parms) {
634 dev->dma_parms->segment_boundary_mask = mask;
635 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800636 }
637 return -EIO;
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800638}
639
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100640#ifndef dma_max_pfn
641static inline unsigned long dma_max_pfn(struct device *dev)
642{
Christoph Hellwiga41ef1e2017-11-30 07:32:51 -0800643 return (*dev->dma_mask >> PAGE_SHIFT) + dev->dma_pfn_offset;
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100644}
645#endif
646
Andrew Morton842fa692011-11-02 13:39:33 -0700647static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
648 dma_addr_t *dma_handle, gfp_t flag)
649{
Joe Perchesede23fa82013-08-26 22:45:23 -0700650 void *ret = dma_alloc_coherent(dev, size, dma_handle,
651 flag | __GFP_ZERO);
Andrew Morton842fa692011-11-02 13:39:33 -0700652 return ret;
653}
654
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700655static inline int dma_get_cache_alignment(void)
656{
657#ifdef ARCH_DMA_MINALIGN
658 return ARCH_DMA_MINALIGN;
659#endif
660 return 1;
661}
662
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663/* flags for the coherent memory api */
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200664#define DMA_MEMORY_EXCLUSIVE 0x01
Linus Torvalds1da177e2005-04-16 15:20:36 -0700665
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800666#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
667int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
668 dma_addr_t device_addr, size_t size, int flags);
669void dma_release_declared_memory(struct device *dev);
670void *dma_mark_declared_memory_occupied(struct device *dev,
671 dma_addr_t device_addr, size_t size);
672#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700673static inline int
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600674dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700675 dma_addr_t device_addr, size_t size, int flags)
676{
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200677 return -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678}
679
680static inline void
681dma_release_declared_memory(struct device *dev)
682{
683}
684
685static inline void *
686dma_mark_declared_memory_occupied(struct device *dev,
687 dma_addr_t device_addr, size_t size)
688{
689 return ERR_PTR(-EBUSY);
690}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800691#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692
Tejun Heo9ac78492007-01-20 16:00:26 +0900693/*
694 * Managed DMA API
695 */
Geert Uytterhoevenab642e92018-03-16 14:25:41 +0100696#ifdef CONFIG_HAS_DMA
Tejun Heo9ac78492007-01-20 16:00:26 +0900697extern void *dmam_alloc_coherent(struct device *dev, size_t size,
698 dma_addr_t *dma_handle, gfp_t gfp);
699extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
700 dma_addr_t dma_handle);
Geert Uytterhoevenab642e92018-03-16 14:25:41 +0100701#else /* !CONFIG_HAS_DMA */
702static inline void *dmam_alloc_coherent(struct device *dev, size_t size,
703 dma_addr_t *dma_handle, gfp_t gfp)
704{ return NULL; }
705static inline void dmam_free_coherent(struct device *dev, size_t size,
706 void *vaddr, dma_addr_t dma_handle) { }
707#endif /* !CONFIG_HAS_DMA */
708
Christoph Hellwig63d36c92017-06-12 19:15:04 +0200709extern void *dmam_alloc_attrs(struct device *dev, size_t size,
710 dma_addr_t *dma_handle, gfp_t gfp,
711 unsigned long attrs);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800712#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600713extern int dmam_declare_coherent_memory(struct device *dev,
714 phys_addr_t phys_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900715 dma_addr_t device_addr, size_t size,
716 int flags);
717extern void dmam_release_declared_memory(struct device *dev);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800718#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900719static inline int dmam_declare_coherent_memory(struct device *dev,
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600720 phys_addr_t phys_addr, dma_addr_t device_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900721 size_t size, gfp_t gfp)
722{
723 return 0;
724}
725
726static inline void dmam_release_declared_memory(struct device *dev)
727{
728}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800729#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900730
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800731static inline void *dma_alloc_wc(struct device *dev, size_t size,
732 dma_addr_t *dma_addr, gfp_t gfp)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200733{
Christoph Hellwig7ed1d912018-09-24 13:06:58 +0200734 unsigned long attrs = DMA_ATTR_NO_WARN;
735
736 if (gfp & __GFP_NOWARN)
737 attrs |= DMA_ATTR_NO_WARN;
738
739 return dma_alloc_attrs(dev, size, dma_addr, gfp, attrs);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200740}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800741#ifndef dma_alloc_writecombine
742#define dma_alloc_writecombine dma_alloc_wc
743#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200744
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800745static inline void dma_free_wc(struct device *dev, size_t size,
746 void *cpu_addr, dma_addr_t dma_addr)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200747{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700748 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
749 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200750}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800751#ifndef dma_free_writecombine
752#define dma_free_writecombine dma_free_wc
753#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200754
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800755static inline int dma_mmap_wc(struct device *dev,
756 struct vm_area_struct *vma,
757 void *cpu_addr, dma_addr_t dma_addr,
758 size_t size)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200759{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700760 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
761 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200762}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800763#ifndef dma_mmap_writecombine
764#define dma_mmap_writecombine dma_mmap_wc
765#endif
Arthur Kepner74bc7ce2008-04-29 01:00:30 -0700766
Christoph Hellwigf616ab52018-05-09 06:53:49 +0200767#ifdef CONFIG_NEED_DMA_MAP_STATE
FUJITA Tomonori0acedc12010-03-10 15:23:31 -0800768#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
769#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
770#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
771#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
772#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
773#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
774#else
775#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
776#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
777#define dma_unmap_addr(PTR, ADDR_NAME) (0)
778#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
779#define dma_unmap_len(PTR, LEN_NAME) (0)
780#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
781#endif
782
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783#endif