blob: b7dd11c02a459e46b59036d23331fef230adeb05 [file] [log] [blame]
Robert P. J. Day96532ba2008-02-03 15:06:26 +02001#ifndef _LINUX_DMA_MAPPING_H
2#define _LINUX_DMA_MAPPING_H
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
Robin Murphy002edb62015-11-06 16:32:51 -08004#include <linux/sizes.h>
Andrew Morton842fa692011-11-02 13:39:33 -07005#include <linux/string.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/device.h>
7#include <linux/err.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -08008#include <linux/dma-debug.h>
Alexey Dobriyanb7f080c2011-06-16 11:01:34 +00009#include <linux/dma-direction.h>
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090010#include <linux/scatterlist.h>
Christoph Hellwige1c7e322016-01-20 15:02:05 -080011#include <linux/kmemcheck.h>
12#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070014/**
15 * List of possible attributes associated with a DMA mapping. The semantics
16 * of each attribute should be defined in Documentation/DMA-attributes.txt.
17 *
18 * DMA_ATTR_WRITE_BARRIER: DMA to a memory region with this attribute
19 * forces all pending DMA writes to complete.
20 */
21#define DMA_ATTR_WRITE_BARRIER (1UL << 0)
22/*
23 * DMA_ATTR_WEAK_ORDERING: Specifies that reads and writes to the mapping
24 * may be weakly ordered, that is that reads and writes may pass each other.
25 */
26#define DMA_ATTR_WEAK_ORDERING (1UL << 1)
27/*
28 * DMA_ATTR_WRITE_COMBINE: Specifies that writes to the mapping may be
29 * buffered to improve performance.
30 */
31#define DMA_ATTR_WRITE_COMBINE (1UL << 2)
32/*
33 * DMA_ATTR_NON_CONSISTENT: Lets the platform to choose to return either
34 * consistent or non-consistent memory as it sees fit.
35 */
36#define DMA_ATTR_NON_CONSISTENT (1UL << 3)
37/*
38 * DMA_ATTR_NO_KERNEL_MAPPING: Lets the platform to avoid creating a kernel
39 * virtual mapping for the allocated buffer.
40 */
41#define DMA_ATTR_NO_KERNEL_MAPPING (1UL << 4)
42/*
43 * DMA_ATTR_SKIP_CPU_SYNC: Allows platform code to skip synchronization of
44 * the CPU cache for the given buffer assuming that it has been already
45 * transferred to 'device' domain.
46 */
47#define DMA_ATTR_SKIP_CPU_SYNC (1UL << 5)
48/*
49 * DMA_ATTR_FORCE_CONTIGUOUS: Forces contiguous allocation of the buffer
50 * in physical memory.
51 */
52#define DMA_ATTR_FORCE_CONTIGUOUS (1UL << 6)
53/*
54 * DMA_ATTR_ALLOC_SINGLE_PAGES: This is a hint to the DMA-mapping subsystem
55 * that it's probably not worth the time to try to allocate memory to in a way
56 * that gives better TLB efficiency.
57 */
58#define DMA_ATTR_ALLOC_SINGLE_PAGES (1UL << 7)
Mauricio Faria de Oliveiraa9a62c92016-10-11 13:54:14 -070059/*
60 * DMA_ATTR_NO_WARN: This tells the DMA-mapping subsystem to suppress
61 * allocation failure reports (similarly to __GFP_NOWARN).
62 */
63#define DMA_ATTR_NO_WARN (1UL << 8)
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070064
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060065/*
Mitchel Humpherysb2fb3662017-01-06 18:58:11 +053066 * DMA_ATTR_PRIVILEGED: used to indicate that the buffer is fully
67 * accessible at an elevated privilege level (and ideally inaccessible or
68 * at least read-only at lesser-privileged levels).
69 */
70#define DMA_ATTR_PRIVILEGED (1UL << 9)
71
72/*
Bjorn Helgaas77f2ea22014-04-30 11:20:53 -060073 * A dma_addr_t can hold any valid DMA or bus address for the platform.
74 * It can be given to a device to use as a DMA source or target. A CPU cannot
75 * reference a dma_addr_t directly because there may be translation between
76 * its physical address space and the bus address space.
77 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090078struct dma_map_ops {
Marek Szyprowski613c4572012-03-28 16:36:27 +020079 void* (*alloc)(struct device *dev, size_t size,
80 dma_addr_t *dma_handle, gfp_t gfp,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070081 unsigned long attrs);
Marek Szyprowski613c4572012-03-28 16:36:27 +020082 void (*free)(struct device *dev, size_t size,
83 void *vaddr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070084 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010085 int (*mmap)(struct device *, struct vm_area_struct *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070086 void *, dma_addr_t, size_t,
87 unsigned long attrs);
Marek Szyprowski9adc5372011-12-21 16:55:33 +010088
Marek Szyprowskid2b74282012-06-13 10:05:52 +020089 int (*get_sgtable)(struct device *dev, struct sg_table *sgt, void *,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070090 dma_addr_t, size_t, unsigned long attrs);
Marek Szyprowskid2b74282012-06-13 10:05:52 +020091
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090092 dma_addr_t (*map_page)(struct device *dev, struct page *page,
93 unsigned long offset, size_t size,
94 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070095 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +090096 void (*unmap_page)(struct device *dev, dma_addr_t dma_handle,
97 size_t size, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -070098 unsigned long attrs);
Ricardo Ribalda Delgado04abab62015-02-11 13:53:15 +010099 /*
100 * map_sg returns 0 on error and a value > 0 on success.
101 * It should never return a value < 0.
102 */
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900103 int (*map_sg)(struct device *dev, struct scatterlist *sg,
104 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700105 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900106 void (*unmap_sg)(struct device *dev,
107 struct scatterlist *sg, int nents,
108 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700109 unsigned long attrs);
Niklas Söderlundba409b32016-08-10 13:22:14 +0200110 dma_addr_t (*map_resource)(struct device *dev, phys_addr_t phys_addr,
111 size_t size, enum dma_data_direction dir,
112 unsigned long attrs);
113 void (*unmap_resource)(struct device *dev, dma_addr_t dma_handle,
114 size_t size, enum dma_data_direction dir,
115 unsigned long attrs);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900116 void (*sync_single_for_cpu)(struct device *dev,
117 dma_addr_t dma_handle, size_t size,
118 enum dma_data_direction dir);
119 void (*sync_single_for_device)(struct device *dev,
120 dma_addr_t dma_handle, size_t size,
121 enum dma_data_direction dir);
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900122 void (*sync_sg_for_cpu)(struct device *dev,
123 struct scatterlist *sg, int nents,
124 enum dma_data_direction dir);
125 void (*sync_sg_for_device)(struct device *dev,
126 struct scatterlist *sg, int nents,
127 enum dma_data_direction dir);
128 int (*mapping_error)(struct device *dev, dma_addr_t dma_addr);
129 int (*dma_supported)(struct device *dev, u64 mask);
Milton Miller3a8f7552011-06-24 09:05:23 +0000130#ifdef ARCH_HAS_DMA_GET_REQUIRED_MASK
131 u64 (*get_required_mask)(struct device *dev);
132#endif
FUJITA Tomonorif0402a22009-01-05 23:59:01 +0900133 int is_phys;
134};
135
Bart Van Assche52997092017-01-20 13:04:01 -0800136extern const struct dma_map_ops dma_noop_ops;
Bart Van Assche551199a2017-01-20 13:04:07 -0800137extern const struct dma_map_ops dma_virt_ops;
Christian Borntraegera8463d42016-02-02 21:46:32 -0800138
Andrew Morton8f286c32007-10-18 03:05:07 -0700139#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
Borislav Petkov34c65382007-10-18 03:05:06 -0700140
James Bottomley32e8f702007-10-16 01:23:55 -0700141#define DMA_MASK_NONE 0x0ULL
142
Rolf Eike Beerd6bd3a32006-09-29 01:59:48 -0700143static inline int valid_dma_direction(int dma_direction)
144{
145 return ((dma_direction == DMA_BIDIRECTIONAL) ||
146 (dma_direction == DMA_TO_DEVICE) ||
147 (dma_direction == DMA_FROM_DEVICE));
148}
149
James Bottomley32e8f702007-10-16 01:23:55 -0700150static inline int is_device_dma_capable(struct device *dev)
151{
152 return dev->dma_mask != NULL && *dev->dma_mask != DMA_MASK_NONE;
153}
154
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800155#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
156/*
157 * These three functions are only for dma allocator.
158 * Don't use them in device drivers.
159 */
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100160int dma_alloc_from_dev_coherent(struct device *dev, ssize_t size,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800161 dma_addr_t *dma_handle, void **ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100162int dma_release_from_dev_coherent(struct device *dev, int order, void *vaddr);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800163
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100164int dma_mmap_from_dev_coherent(struct device *dev, struct vm_area_struct *vma,
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800165 void *cpu_addr, size_t size, int *ret);
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100166
167void *dma_alloc_from_global_coherent(ssize_t size, dma_addr_t *dma_handle);
168int dma_release_from_global_coherent(int order, void *vaddr);
169int dma_mmap_from_global_coherent(struct vm_area_struct *vma, void *cpu_addr,
170 size_t size, int *ret);
171
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800172#else
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100173#define dma_alloc_from_dev_coherent(dev, size, handle, ret) (0)
174#define dma_release_from_dev_coherent(dev, order, vaddr) (0)
175#define dma_mmap_from_dev_coherent(dev, vma, vaddr, order, ret) (0)
176
177static inline void *dma_alloc_from_global_coherent(ssize_t size,
178 dma_addr_t *dma_handle)
179{
180 return NULL;
181}
182
183static inline int dma_release_from_global_coherent(int order, void *vaddr)
184{
185 return 0;
186}
187
188static inline int dma_mmap_from_global_coherent(struct vm_area_struct *vma,
189 void *cpu_addr, size_t size,
190 int *ret)
191{
192 return 0;
193}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800194#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
195
Dan Williams1b0fac42007-07-15 23:40:26 -0700196#ifdef CONFIG_HAS_DMA
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197#include <asm/dma-mapping.h>
Bart Van Assche815dd182017-01-20 13:04:04 -0800198static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
199{
200 if (dev && dev->dma_ops)
201 return dev->dma_ops;
202 return get_arch_dma_ops(dev ? dev->bus : NULL);
203}
204
Bart Van Asscheca6e8e12017-01-20 13:04:03 -0800205static inline void set_dma_ops(struct device *dev,
206 const struct dma_map_ops *dma_ops)
207{
208 dev->dma_ops = dma_ops;
209}
Dan Williams1b0fac42007-07-15 23:40:26 -0700210#else
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800211/*
212 * Define the dma api to allow compilation but not linking of
213 * dma dependent code. Code that depends on the dma-mapping
214 * API needs to set 'depends on HAS_DMA' in its Kconfig
215 */
Bart Van Assche52997092017-01-20 13:04:01 -0800216extern const struct dma_map_ops bad_dma_ops;
217static inline const struct dma_map_ops *get_dma_ops(struct device *dev)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800218{
219 return &bad_dma_ops;
220}
221#endif
222
223static inline dma_addr_t dma_map_single_attrs(struct device *dev, void *ptr,
224 size_t size,
225 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700226 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800227{
Bart Van Assche52997092017-01-20 13:04:01 -0800228 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800229 dma_addr_t addr;
230
231 kmemcheck_mark_initialized(ptr, size);
232 BUG_ON(!valid_dma_direction(dir));
233 addr = ops->map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800234 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800235 dir, attrs);
236 debug_dma_map_page(dev, virt_to_page(ptr),
Geliang Tang8e994692016-01-20 15:02:12 -0800237 offset_in_page(ptr), size,
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800238 dir, addr, true);
239 return addr;
240}
241
242static inline void dma_unmap_single_attrs(struct device *dev, dma_addr_t addr,
243 size_t size,
244 enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700245 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800246{
Bart Van Assche52997092017-01-20 13:04:01 -0800247 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800248
249 BUG_ON(!valid_dma_direction(dir));
250 if (ops->unmap_page)
251 ops->unmap_page(dev, addr, size, dir, attrs);
252 debug_dma_unmap_page(dev, addr, size, dir, true);
253}
254
255/*
256 * dma_maps_sg_attrs returns 0 on error and > 0 on success.
257 * It should never return a value < 0.
258 */
259static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
260 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700261 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800262{
Bart Van Assche52997092017-01-20 13:04:01 -0800263 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800264 int i, ents;
265 struct scatterlist *s;
266
267 for_each_sg(sg, s, nents, i)
268 kmemcheck_mark_initialized(sg_virt(s), s->length);
269 BUG_ON(!valid_dma_direction(dir));
270 ents = ops->map_sg(dev, sg, nents, dir, attrs);
271 BUG_ON(ents < 0);
272 debug_dma_map_sg(dev, sg, nents, ents, dir);
273
274 return ents;
275}
276
277static inline void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg,
278 int nents, enum dma_data_direction dir,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700279 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800280{
Bart Van Assche52997092017-01-20 13:04:01 -0800281 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800282
283 BUG_ON(!valid_dma_direction(dir));
284 debug_dma_unmap_sg(dev, sg, nents, dir);
285 if (ops->unmap_sg)
286 ops->unmap_sg(dev, sg, nents, dir, attrs);
287}
288
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800289static inline dma_addr_t dma_map_page_attrs(struct device *dev,
290 struct page *page,
291 size_t offset, size_t size,
292 enum dma_data_direction dir,
293 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800294{
Bart Van Assche52997092017-01-20 13:04:01 -0800295 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800296 dma_addr_t addr;
297
298 kmemcheck_mark_initialized(page_address(page) + offset, size);
299 BUG_ON(!valid_dma_direction(dir));
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800300 addr = ops->map_page(dev, page, offset, size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800301 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
302
303 return addr;
304}
305
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800306static inline void dma_unmap_page_attrs(struct device *dev,
307 dma_addr_t addr, size_t size,
308 enum dma_data_direction dir,
309 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800310{
Bart Van Assche52997092017-01-20 13:04:01 -0800311 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800312
313 BUG_ON(!valid_dma_direction(dir));
314 if (ops->unmap_page)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800315 ops->unmap_page(dev, addr, size, dir, attrs);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800316 debug_dma_unmap_page(dev, addr, size, dir, false);
317}
318
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200319static inline dma_addr_t dma_map_resource(struct device *dev,
320 phys_addr_t phys_addr,
321 size_t size,
322 enum dma_data_direction dir,
323 unsigned long attrs)
324{
Bart Van Assche52997092017-01-20 13:04:01 -0800325 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200326 dma_addr_t addr;
327
328 BUG_ON(!valid_dma_direction(dir));
329
330 /* Don't allow RAM to be mapped */
Niklas Söderlund3757dc42016-09-29 12:02:40 +0200331 BUG_ON(pfn_valid(PHYS_PFN(phys_addr)));
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200332
333 addr = phys_addr;
334 if (ops->map_resource)
335 addr = ops->map_resource(dev, phys_addr, size, dir, attrs);
336
337 debug_dma_map_resource(dev, phys_addr, size, dir, addr);
338
339 return addr;
340}
341
342static inline void dma_unmap_resource(struct device *dev, dma_addr_t addr,
343 size_t size, enum dma_data_direction dir,
344 unsigned long attrs)
345{
Bart Van Assche52997092017-01-20 13:04:01 -0800346 const struct dma_map_ops *ops = get_dma_ops(dev);
Niklas Söderlund6f3d8792016-08-10 13:22:16 +0200347
348 BUG_ON(!valid_dma_direction(dir));
349 if (ops->unmap_resource)
350 ops->unmap_resource(dev, addr, size, dir, attrs);
351 debug_dma_unmap_resource(dev, addr, size, dir);
352}
353
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800354static inline void dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr,
355 size_t size,
356 enum dma_data_direction dir)
357{
Bart Van Assche52997092017-01-20 13:04:01 -0800358 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800359
360 BUG_ON(!valid_dma_direction(dir));
361 if (ops->sync_single_for_cpu)
362 ops->sync_single_for_cpu(dev, addr, size, dir);
363 debug_dma_sync_single_for_cpu(dev, addr, size, dir);
364}
365
366static inline void dma_sync_single_for_device(struct device *dev,
367 dma_addr_t addr, size_t size,
368 enum dma_data_direction dir)
369{
Bart Van Assche52997092017-01-20 13:04:01 -0800370 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800371
372 BUG_ON(!valid_dma_direction(dir));
373 if (ops->sync_single_for_device)
374 ops->sync_single_for_device(dev, addr, size, dir);
375 debug_dma_sync_single_for_device(dev, addr, size, dir);
376}
377
378static inline void dma_sync_single_range_for_cpu(struct device *dev,
379 dma_addr_t addr,
380 unsigned long offset,
381 size_t size,
382 enum dma_data_direction dir)
383{
384 const struct dma_map_ops *ops = get_dma_ops(dev);
385
386 BUG_ON(!valid_dma_direction(dir));
387 if (ops->sync_single_for_cpu)
388 ops->sync_single_for_cpu(dev, addr + offset, size, dir);
389 debug_dma_sync_single_range_for_cpu(dev, addr, offset, size, dir);
390}
391
392static inline void dma_sync_single_range_for_device(struct device *dev,
393 dma_addr_t addr,
394 unsigned long offset,
395 size_t size,
396 enum dma_data_direction dir)
397{
398 const struct dma_map_ops *ops = get_dma_ops(dev);
399
400 BUG_ON(!valid_dma_direction(dir));
401 if (ops->sync_single_for_device)
402 ops->sync_single_for_device(dev, addr + offset, size, dir);
403 debug_dma_sync_single_range_for_device(dev, addr, offset, size, dir);
404}
405
406static inline void
407dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
408 int nelems, enum dma_data_direction dir)
409{
Bart Van Assche52997092017-01-20 13:04:01 -0800410 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800411
412 BUG_ON(!valid_dma_direction(dir));
413 if (ops->sync_sg_for_cpu)
414 ops->sync_sg_for_cpu(dev, sg, nelems, dir);
415 debug_dma_sync_sg_for_cpu(dev, sg, nelems, dir);
416}
417
418static inline void
419dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
420 int nelems, enum dma_data_direction dir)
421{
Bart Van Assche52997092017-01-20 13:04:01 -0800422 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800423
424 BUG_ON(!valid_dma_direction(dir));
425 if (ops->sync_sg_for_device)
426 ops->sync_sg_for_device(dev, sg, nelems, dir);
427 debug_dma_sync_sg_for_device(dev, sg, nelems, dir);
428
429}
430
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700431#define dma_map_single(d, a, s, r) dma_map_single_attrs(d, a, s, r, 0)
432#define dma_unmap_single(d, a, s, r) dma_unmap_single_attrs(d, a, s, r, 0)
433#define dma_map_sg(d, s, n, r) dma_map_sg_attrs(d, s, n, r, 0)
434#define dma_unmap_sg(d, s, n, r) dma_unmap_sg_attrs(d, s, n, r, 0)
Alexander Duyck0495c3d2016-12-14 15:05:23 -0800435#define dma_map_page(d, p, o, s, r) dma_map_page_attrs(d, p, o, s, r, 0)
436#define dma_unmap_page(d, a, s, r) dma_unmap_page_attrs(d, a, s, r, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800437
438extern int dma_common_mmap(struct device *dev, struct vm_area_struct *vma,
439 void *cpu_addr, dma_addr_t dma_addr, size_t size);
440
441void *dma_common_contiguous_remap(struct page *page, size_t size,
442 unsigned long vm_flags,
443 pgprot_t prot, const void *caller);
444
445void *dma_common_pages_remap(struct page **pages, size_t size,
446 unsigned long vm_flags, pgprot_t prot,
447 const void *caller);
448void dma_common_free_remap(void *cpu_addr, size_t size, unsigned long vm_flags);
449
450/**
451 * dma_mmap_attrs - map a coherent DMA allocation into user space
452 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
453 * @vma: vm_area_struct describing requested user mapping
454 * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs
455 * @handle: device-view address returned from dma_alloc_attrs
456 * @size: size of memory originally requested in dma_alloc_attrs
457 * @attrs: attributes of mapping properties requested in dma_alloc_attrs
458 *
459 * Map a coherent DMA buffer previously allocated by dma_alloc_attrs
460 * into user space. The coherent DMA buffer must not be freed by the
461 * driver until the user space mapping has been released.
462 */
463static inline int
464dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700465 dma_addr_t dma_addr, size_t size, unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800466{
Bart Van Assche52997092017-01-20 13:04:01 -0800467 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800468 BUG_ON(!ops);
469 if (ops->mmap)
470 return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs);
471 return dma_common_mmap(dev, vma, cpu_addr, dma_addr, size);
472}
473
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700474#define dma_mmap_coherent(d, v, c, h, s) dma_mmap_attrs(d, v, c, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800475
476int
477dma_common_get_sgtable(struct device *dev, struct sg_table *sgt,
478 void *cpu_addr, dma_addr_t dma_addr, size_t size);
479
480static inline int
481dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, void *cpu_addr,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700482 dma_addr_t dma_addr, size_t size,
483 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800484{
Bart Van Assche52997092017-01-20 13:04:01 -0800485 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800486 BUG_ON(!ops);
487 if (ops->get_sgtable)
488 return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size,
489 attrs);
490 return dma_common_get_sgtable(dev, sgt, cpu_addr, dma_addr, size);
491}
492
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700493#define dma_get_sgtable(d, t, v, h, s) dma_get_sgtable_attrs(d, t, v, h, s, 0)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800494
495#ifndef arch_dma_alloc_attrs
496#define arch_dma_alloc_attrs(dev, flag) (true)
497#endif
498
499static inline void *dma_alloc_attrs(struct device *dev, size_t size,
500 dma_addr_t *dma_handle, gfp_t flag,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700501 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800502{
Bart Van Assche52997092017-01-20 13:04:01 -0800503 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800504 void *cpu_addr;
505
506 BUG_ON(!ops);
507
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100508 if (dma_alloc_from_dev_coherent(dev, size, dma_handle, &cpu_addr))
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800509 return cpu_addr;
510
511 if (!arch_dma_alloc_attrs(&dev, &flag))
512 return NULL;
513 if (!ops->alloc)
514 return NULL;
515
516 cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs);
517 debug_dma_alloc_coherent(dev, size, *dma_handle, cpu_addr);
518 return cpu_addr;
519}
520
521static inline void dma_free_attrs(struct device *dev, size_t size,
522 void *cpu_addr, dma_addr_t dma_handle,
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700523 unsigned long attrs)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800524{
Bart Van Assche52997092017-01-20 13:04:01 -0800525 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800526
527 BUG_ON(!ops);
528 WARN_ON(irqs_disabled());
529
Vladimir Murzin43fc5092017-07-20 11:19:58 +0100530 if (dma_release_from_dev_coherent(dev, get_order(size), cpu_addr))
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800531 return;
532
Zhen Leid6b7eae2016-03-09 14:08:38 -0800533 if (!ops->free || !cpu_addr)
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800534 return;
535
536 debug_dma_free_coherent(dev, size, cpu_addr, dma_handle);
537 ops->free(dev, size, cpu_addr, dma_handle, attrs);
538}
539
540static inline void *dma_alloc_coherent(struct device *dev, size_t size,
541 dma_addr_t *dma_handle, gfp_t flag)
542{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700543 return dma_alloc_attrs(dev, size, dma_handle, flag, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800544}
545
546static inline void dma_free_coherent(struct device *dev, size_t size,
547 void *cpu_addr, dma_addr_t dma_handle)
548{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700549 return dma_free_attrs(dev, size, cpu_addr, dma_handle, 0);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800550}
551
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800552static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
553{
Robin Murphy5237e952017-07-24 18:29:27 +0100554 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800555
Robin Murphy5237e952017-07-24 18:29:27 +0100556 debug_dma_mapping_error(dev, dma_addr);
557 if (ops->mapping_error)
558 return ops->mapping_error(dev, dma_addr);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800559 return 0;
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800560}
561
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800562static inline int dma_supported(struct device *dev, u64 mask)
563{
Bart Van Assche52997092017-01-20 13:04:01 -0800564 const struct dma_map_ops *ops = get_dma_ops(dev);
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800565
566 if (!ops)
567 return 0;
568 if (!ops->dma_supported)
569 return 1;
570 return ops->dma_supported(dev, mask);
571}
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800572
573#ifndef HAVE_ARCH_DMA_SET_MASK
574static inline int dma_set_mask(struct device *dev, u64 mask)
575{
Christoph Hellwige1c7e322016-01-20 15:02:05 -0800576 if (!dev->dma_mask || !dma_supported(dev, mask))
577 return -EIO;
578 *dev->dma_mask = mask;
579 return 0;
580}
Dan Williams1b0fac42007-07-15 23:40:26 -0700581#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700582
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900583static inline u64 dma_get_mask(struct device *dev)
584{
FUJITA Tomonori07a2c012008-09-19 02:02:05 +0900585 if (dev && dev->dma_mask && *dev->dma_mask)
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900586 return *dev->dma_mask;
Yang Hongyang284901a2009-04-06 19:01:15 -0700587 return DMA_BIT_MASK(32);
FUJITA Tomonori589fc9a2008-09-12 19:42:34 +0900588}
589
Rob Herring58af4a22012-03-20 14:33:01 -0500590#ifdef CONFIG_ARCH_HAS_DMA_SET_COHERENT_MASK
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700591int dma_set_coherent_mask(struct device *dev, u64 mask);
592#else
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800593static inline int dma_set_coherent_mask(struct device *dev, u64 mask)
594{
595 if (!dma_supported(dev, mask))
596 return -EIO;
597 dev->coherent_dma_mask = mask;
598 return 0;
599}
FUJITA Tomonori710224f2010-09-22 13:04:55 -0700600#endif
FUJITA Tomonori6a1961f2010-03-10 15:23:39 -0800601
Russell King4aa806b2013-06-26 13:49:44 +0100602/*
603 * Set both the DMA mask and the coherent DMA mask to the same thing.
604 * Note that we don't check the return value from dma_set_coherent_mask()
605 * as the DMA API guarantees that the coherent DMA mask can be set to
606 * the same or smaller than the streaming DMA mask.
607 */
608static inline int dma_set_mask_and_coherent(struct device *dev, u64 mask)
609{
610 int rc = dma_set_mask(dev, mask);
611 if (rc == 0)
612 dma_set_coherent_mask(dev, mask);
613 return rc;
614}
615
Russell Kingfa6a8d62013-06-27 12:21:45 +0100616/*
617 * Similar to the above, except it deals with the case where the device
618 * does not have dev->dma_mask appropriately setup.
619 */
620static inline int dma_coerce_mask_and_coherent(struct device *dev, u64 mask)
621{
622 dev->dma_mask = &dev->coherent_dma_mask;
623 return dma_set_mask_and_coherent(dev, mask);
624}
625
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626extern u64 dma_get_required_mask(struct device *dev);
627
Will Deacona3a60f82014-08-27 15:49:10 +0100628#ifndef arch_setup_dma_ops
Will Deacon97890ba2014-08-27 16:24:20 +0100629static inline void arch_setup_dma_ops(struct device *dev, u64 dma_base,
Robin Murphy53c92d72016-04-07 18:42:05 +0100630 u64 size, const struct iommu_ops *iommu,
Will Deacon97890ba2014-08-27 16:24:20 +0100631 bool coherent) { }
632#endif
633
634#ifndef arch_teardown_dma_ops
635static inline void arch_teardown_dma_ops(struct device *dev) { }
Santosh Shilimkar591c1ee2014-04-24 11:30:04 -0400636#endif
637
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800638static inline unsigned int dma_get_max_seg_size(struct device *dev)
639{
Robin Murphy002edb62015-11-06 16:32:51 -0800640 if (dev->dma_parms && dev->dma_parms->max_segment_size)
641 return dev->dma_parms->max_segment_size;
642 return SZ_64K;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800643}
644
645static inline unsigned int dma_set_max_seg_size(struct device *dev,
646 unsigned int size)
647{
648 if (dev->dma_parms) {
649 dev->dma_parms->max_segment_size = size;
650 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800651 }
652 return -EIO;
FUJITA Tomonori6b7b6512008-02-04 22:27:55 -0800653}
654
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800655static inline unsigned long dma_get_seg_boundary(struct device *dev)
656{
Robin Murphy002edb62015-11-06 16:32:51 -0800657 if (dev->dma_parms && dev->dma_parms->segment_boundary_mask)
658 return dev->dma_parms->segment_boundary_mask;
659 return DMA_BIT_MASK(32);
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800660}
661
662static inline int dma_set_seg_boundary(struct device *dev, unsigned long mask)
663{
664 if (dev->dma_parms) {
665 dev->dma_parms->segment_boundary_mask = mask;
666 return 0;
Robin Murphy002edb62015-11-06 16:32:51 -0800667 }
668 return -EIO;
FUJITA Tomonorid22a6962008-02-04 22:28:13 -0800669}
670
Santosh Shilimkar00c8f162013-07-29 14:18:48 +0100671#ifndef dma_max_pfn
672static inline unsigned long dma_max_pfn(struct device *dev)
673{
674 return *dev->dma_mask >> PAGE_SHIFT;
675}
676#endif
677
Andrew Morton842fa692011-11-02 13:39:33 -0700678static inline void *dma_zalloc_coherent(struct device *dev, size_t size,
679 dma_addr_t *dma_handle, gfp_t flag)
680{
Joe Perchesede23fa82013-08-26 22:45:23 -0700681 void *ret = dma_alloc_coherent(dev, size, dma_handle,
682 flag | __GFP_ZERO);
Andrew Morton842fa692011-11-02 13:39:33 -0700683 return ret;
684}
685
Heiko Carstense259f192010-08-13 09:39:18 +0200686#ifdef CONFIG_HAS_DMA
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700687static inline int dma_get_cache_alignment(void)
688{
689#ifdef ARCH_DMA_MINALIGN
690 return ARCH_DMA_MINALIGN;
691#endif
692 return 1;
693}
Heiko Carstense259f192010-08-13 09:39:18 +0200694#endif
FUJITA Tomonori4565f012010-08-10 18:03:22 -0700695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696/* flags for the coherent memory api */
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200697#define DMA_MEMORY_EXCLUSIVE 0x01
Linus Torvalds1da177e2005-04-16 15:20:36 -0700698
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800699#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
700int dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
701 dma_addr_t device_addr, size_t size, int flags);
702void dma_release_declared_memory(struct device *dev);
703void *dma_mark_declared_memory_occupied(struct device *dev,
704 dma_addr_t device_addr, size_t size);
705#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706static inline int
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600707dma_declare_coherent_memory(struct device *dev, phys_addr_t phys_addr,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700708 dma_addr_t device_addr, size_t size, int flags)
709{
Christoph Hellwig2436bdc2017-08-25 17:13:09 +0200710 return -ENOSYS;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711}
712
713static inline void
714dma_release_declared_memory(struct device *dev)
715{
716}
717
718static inline void *
719dma_mark_declared_memory_occupied(struct device *dev,
720 dma_addr_t device_addr, size_t size)
721{
722 return ERR_PTR(-EBUSY);
723}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800724#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700725
Sricharan R09515ef2017-04-10 16:51:01 +0530726#ifdef CONFIG_HAS_DMA
727int dma_configure(struct device *dev);
728void dma_deconfigure(struct device *dev);
729#else
730static inline int dma_configure(struct device *dev)
731{
732 return 0;
733}
734
735static inline void dma_deconfigure(struct device *dev) {}
736#endif
737
Tejun Heo9ac78492007-01-20 16:00:26 +0900738/*
739 * Managed DMA API
740 */
741extern void *dmam_alloc_coherent(struct device *dev, size_t size,
742 dma_addr_t *dma_handle, gfp_t gfp);
743extern void dmam_free_coherent(struct device *dev, size_t size, void *vaddr,
744 dma_addr_t dma_handle);
Christoph Hellwig63d36c92017-06-12 19:15:04 +0200745extern void *dmam_alloc_attrs(struct device *dev, size_t size,
746 dma_addr_t *dma_handle, gfp_t gfp,
747 unsigned long attrs);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800748#ifdef CONFIG_HAVE_GENERIC_DMA_COHERENT
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600749extern int dmam_declare_coherent_memory(struct device *dev,
750 phys_addr_t phys_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900751 dma_addr_t device_addr, size_t size,
752 int flags);
753extern void dmam_release_declared_memory(struct device *dev);
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800754#else /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900755static inline int dmam_declare_coherent_memory(struct device *dev,
Bjorn Helgaas88a984b2014-05-20 16:54:22 -0600756 phys_addr_t phys_addr, dma_addr_t device_addr,
Tejun Heo9ac78492007-01-20 16:00:26 +0900757 size_t size, gfp_t gfp)
758{
759 return 0;
760}
761
762static inline void dmam_release_declared_memory(struct device *dev)
763{
764}
Christoph Hellwig20d666e2016-01-20 15:02:09 -0800765#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
Tejun Heo9ac78492007-01-20 16:00:26 +0900766
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800767static inline void *dma_alloc_wc(struct device *dev, size_t size,
768 dma_addr_t *dma_addr, gfp_t gfp)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200769{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700770 return dma_alloc_attrs(dev, size, dma_addr, gfp,
771 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200772}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800773#ifndef dma_alloc_writecombine
774#define dma_alloc_writecombine dma_alloc_wc
775#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200776
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800777static inline void dma_free_wc(struct device *dev, size_t size,
778 void *cpu_addr, dma_addr_t dma_addr)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200779{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700780 return dma_free_attrs(dev, size, cpu_addr, dma_addr,
781 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200782}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800783#ifndef dma_free_writecombine
784#define dma_free_writecombine dma_free_wc
785#endif
Thierry Redingb4bbb102014-06-27 11:56:58 +0200786
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800787static inline int dma_mmap_wc(struct device *dev,
788 struct vm_area_struct *vma,
789 void *cpu_addr, dma_addr_t dma_addr,
790 size_t size)
Thierry Redingb4bbb102014-06-27 11:56:58 +0200791{
Krzysztof Kozlowski00085f12016-08-03 13:46:00 -0700792 return dma_mmap_attrs(dev, vma, cpu_addr, dma_addr, size,
793 DMA_ATTR_WRITE_COMBINE);
Thierry Redingb4bbb102014-06-27 11:56:58 +0200794}
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800795#ifndef dma_mmap_writecombine
796#define dma_mmap_writecombine dma_mmap_wc
797#endif
Arthur Kepner74bc7ce2008-04-29 01:00:30 -0700798
Andrey Smirnov24813662016-09-28 15:22:33 -0700799#if defined(CONFIG_NEED_DMA_MAP_STATE) || defined(CONFIG_DMA_API_DEBUG)
FUJITA Tomonori0acedc12010-03-10 15:23:31 -0800800#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME) dma_addr_t ADDR_NAME
801#define DEFINE_DMA_UNMAP_LEN(LEN_NAME) __u32 LEN_NAME
802#define dma_unmap_addr(PTR, ADDR_NAME) ((PTR)->ADDR_NAME)
803#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) (((PTR)->ADDR_NAME) = (VAL))
804#define dma_unmap_len(PTR, LEN_NAME) ((PTR)->LEN_NAME)
805#define dma_unmap_len_set(PTR, LEN_NAME, VAL) (((PTR)->LEN_NAME) = (VAL))
806#else
807#define DEFINE_DMA_UNMAP_ADDR(ADDR_NAME)
808#define DEFINE_DMA_UNMAP_LEN(LEN_NAME)
809#define dma_unmap_addr(PTR, ADDR_NAME) (0)
810#define dma_unmap_addr_set(PTR, ADDR_NAME, VAL) do { } while (0)
811#define dma_unmap_len(PTR, LEN_NAME) (0)
812#define dma_unmap_len_set(PTR, LEN_NAME, VAL) do { } while (0)
813#endif
814
Linus Torvalds1da177e2005-04-16 15:20:36 -0700815#endif