blob: a1a4dc7fe6ece75087cc33e33d3c60e2477cd586 [file] [log] [blame]
Glauber Costa6f536632008-03-25 18:36:20 -03001#ifndef _ASM_DMA_MAPPING_H_
2#define _ASM_DMA_MAPPING_H_
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
10#include <asm/io.h>
11#include <asm/swiotlb.h>
12
Glauber Costa7c183412008-03-25 18:36:36 -030013extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030014extern int iommu_merge;
15extern struct device fallback_dev;
16extern int panic_on_overflow;
Glauber Costabca5c092008-04-08 13:20:53 -030017extern int forbid_dac;
Glauber Costafae9a0d2008-04-08 13:20:56 -030018extern int force_iommu;
Glauber Costa7c183412008-03-25 18:36:36 -030019
Glauber Costa6f536632008-03-25 18:36:20 -030020struct dma_mapping_ops {
21 int (*mapping_error)(dma_addr_t dma_addr);
22 void* (*alloc_coherent)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp);
24 void (*free_coherent)(struct device *dev, size_t size,
25 void *vaddr, dma_addr_t dma_handle);
Ingo Molnar2be62142008-04-19 19:19:56 +020026 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
Glauber Costa6f536632008-03-25 18:36:20 -030027 size_t size, int direction);
28 /* like map_single, but doesn't check the device mask */
Ingo Molnar2be62142008-04-19 19:19:56 +020029 dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
Glauber Costa6f536632008-03-25 18:36:20 -030030 size_t size, int direction);
31 void (*unmap_single)(struct device *dev, dma_addr_t addr,
32 size_t size, int direction);
33 void (*sync_single_for_cpu)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
35 int direction);
36 void (*sync_single_for_device)(struct device *hwdev,
37 dma_addr_t dma_handle, size_t size,
38 int direction);
39 void (*sync_single_range_for_cpu)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_single_range_for_device)(struct device *hwdev,
43 dma_addr_t dma_handle, unsigned long offset,
44 size_t size, int direction);
45 void (*sync_sg_for_cpu)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
47 int direction);
48 void (*sync_sg_for_device)(struct device *hwdev,
49 struct scatterlist *sg, int nelems,
50 int direction);
51 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
52 int nents, int direction);
53 void (*unmap_sg)(struct device *hwdev,
54 struct scatterlist *sg, int nents,
55 int direction);
56 int (*dma_supported)(struct device *hwdev, u64 mask);
57 int is_phys;
58};
59
Glauber Costa22456b92008-03-25 18:36:21 -030060extern const struct dma_mapping_ops *dma_ops;
61
Glauber Costac786df02008-03-25 18:36:37 -030062static inline int dma_mapping_error(dma_addr_t dma_addr)
63{
64 if (dma_ops->mapping_error)
65 return dma_ops->mapping_error(dma_addr);
66
67 return (dma_addr == bad_dma_address);
68}
69
Glauber Costa8d396de2008-03-25 18:36:31 -030070#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
71#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
72
73void *dma_alloc_coherent(struct device *dev, size_t size,
74 dma_addr_t *dma_handle, gfp_t flag);
75
76void dma_free_coherent(struct device *dev, size_t size,
77 void *vaddr, dma_addr_t dma_handle);
78
79
Glauber Costa802c1f62008-03-25 18:36:34 -030080extern int dma_supported(struct device *hwdev, u64 mask);
81extern int dma_set_mask(struct device *dev, u64 mask);
82
Glauber Costa22456b92008-03-25 18:36:21 -030083static inline dma_addr_t
84dma_map_single(struct device *hwdev, void *ptr, size_t size,
85 int direction)
86{
87 BUG_ON(!valid_dma_direction(direction));
Ingo Molnar2be62142008-04-19 19:19:56 +020088 return dma_ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
Glauber Costa22456b92008-03-25 18:36:21 -030089}
90
Glauber Costa0cb0ae62008-03-25 18:36:22 -030091static inline void
92dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
93 int direction)
94{
95 BUG_ON(!valid_dma_direction(direction));
96 if (dma_ops->unmap_single)
97 dma_ops->unmap_single(dev, addr, size, direction);
98}
99
Glauber Costa16a3ce92008-03-25 18:36:23 -0300100static inline int
101dma_map_sg(struct device *hwdev, struct scatterlist *sg,
102 int nents, int direction)
103{
104 BUG_ON(!valid_dma_direction(direction));
105 return dma_ops->map_sg(hwdev, sg, nents, direction);
106}
Glauber Costa72c784f2008-03-25 18:36:24 -0300107
108static inline void
109dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
110 int direction)
111{
112 BUG_ON(!valid_dma_direction(direction));
113 if (dma_ops->unmap_sg)
114 dma_ops->unmap_sg(hwdev, sg, nents, direction);
115}
Glauber Costac01dd8c2008-03-25 18:36:25 -0300116
117static inline void
118dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
119 size_t size, int direction)
120{
121 BUG_ON(!valid_dma_direction(direction));
122 if (dma_ops->sync_single_for_cpu)
123 dma_ops->sync_single_for_cpu(hwdev, dma_handle, size,
124 direction);
125 flush_write_buffers();
126}
127
Glauber Costa9231b262008-03-25 18:36:26 -0300128static inline void
129dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
130 size_t size, int direction)
131{
132 BUG_ON(!valid_dma_direction(direction));
133 if (dma_ops->sync_single_for_device)
134 dma_ops->sync_single_for_device(hwdev, dma_handle, size,
135 direction);
136 flush_write_buffers();
137}
138
Glauber Costa627610f2008-03-25 18:36:27 -0300139static inline void
140dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
141 unsigned long offset, size_t size, int direction)
142{
143 BUG_ON(!valid_dma_direction(direction));
144 if (dma_ops->sync_single_range_for_cpu)
145 dma_ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
146 size, direction);
147
148 flush_write_buffers();
149}
Glauber Costa71362332008-03-25 18:36:28 -0300150
151static inline void
152dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
153 unsigned long offset, size_t size,
154 int direction)
155{
156 BUG_ON(!valid_dma_direction(direction));
157 if (dma_ops->sync_single_range_for_device)
158 dma_ops->sync_single_range_for_device(hwdev, dma_handle,
159 offset, size, direction);
160
161 flush_write_buffers();
162}
163
Glauber Costaed435de2008-03-25 18:36:29 -0300164static inline void
165dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
166 int nelems, int direction)
167{
168 BUG_ON(!valid_dma_direction(direction));
169 if (dma_ops->sync_sg_for_cpu)
170 dma_ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
171 flush_write_buffers();
172}
Glauber Costae7f3a912008-03-25 18:36:30 -0300173
174static inline void
175dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
176 int nelems, int direction)
177{
178 BUG_ON(!valid_dma_direction(direction));
179 if (dma_ops->sync_sg_for_device)
180 dma_ops->sync_sg_for_device(hwdev, sg, nelems, direction);
181
182 flush_write_buffers();
183}
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300184
185static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
186 size_t offset, size_t size,
187 int direction)
188{
Ingo Molnar2be62142008-04-19 19:19:56 +0200189 BUG_ON(!valid_dma_direction(direction));
190 return dma_ops->map_single(dev, page_to_phys(page)+offset,
191 size, direction);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300192}
193
194static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
195 size_t size, int direction)
196{
197 dma_unmap_single(dev, addr, size, direction);
198}
199
Glauber Costa3cb6a912008-03-25 18:36:33 -0300200static inline void
201dma_cache_sync(struct device *dev, void *vaddr, size_t size,
202 enum dma_data_direction dir)
203{
204 flush_write_buffers();
205}
Glauber Costaae17a63b2008-03-25 18:36:38 -0300206
Glauber Costab7107a32008-03-25 18:36:39 -0300207static inline int dma_get_cache_alignment(void)
208{
209 /* no easy way to get cache size on all x86, so return the
210 * maximum possible, to be safe */
211 return boot_cpu_data.x86_clflush_size;
212}
213
214#define dma_is_consistent(d, h) (1)
215
Glauber Costaae17a63b2008-03-25 18:36:38 -0300216#ifdef CONFIG_X86_32
217# define ARCH_HAS_DMA_DECLARE_COHERENT_MEMORY
Glauber Costa8e8edc62008-04-08 13:20:57 -0300218struct dma_coherent_mem {
219 void *virt_base;
220 u32 device_base;
221 int size;
222 int flags;
223 unsigned long *bitmap;
224};
225
Glauber Costaae17a63b2008-03-25 18:36:38 -0300226extern int
227dma_declare_coherent_memory(struct device *dev, dma_addr_t bus_addr,
228 dma_addr_t device_addr, size_t size, int flags);
229
230extern void
231dma_release_declared_memory(struct device *dev);
232
233extern void *
234dma_mark_declared_memory_occupied(struct device *dev,
235 dma_addr_t device_addr, size_t size);
236#endif /* CONFIG_X86_32 */
Glauber Costa6f536632008-03-25 18:36:20 -0300237#endif