blob: f408e6dd1778d8c69e5b326dbdddd80ec2617eee [file] [log] [blame]
Glauber Costa6f536632008-03-25 18:36:20 -03001#ifndef _ASM_DMA_MAPPING_H_
2#define _ASM_DMA_MAPPING_H_
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
10#include <asm/io.h>
11#include <asm/swiotlb.h>
Joerg Roedel6c505ce2008-08-19 16:32:45 +020012#include <asm-generic/dma-coherent.h>
Glauber Costa6f536632008-03-25 18:36:20 -030013
Glauber Costa7c183412008-03-25 18:36:36 -030014extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030015extern int iommu_merge;
Joerg Roedel6c505ce2008-08-19 16:32:45 +020016extern struct device x86_dma_fallback_dev;
Glauber Costab7107a32008-03-25 18:36:39 -030017extern int panic_on_overflow;
Glauber Costa7c183412008-03-25 18:36:36 -030018
Glauber Costa6f536632008-03-25 18:36:20 -030019struct dma_mapping_ops {
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070020 int (*mapping_error)(struct device *dev,
21 dma_addr_t dma_addr);
Glauber Costa6f536632008-03-25 18:36:20 -030022 void* (*alloc_coherent)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp);
24 void (*free_coherent)(struct device *dev, size_t size,
25 void *vaddr, dma_addr_t dma_handle);
Ingo Molnar2be62142008-04-19 19:19:56 +020026 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
Glauber Costa6f536632008-03-25 18:36:20 -030027 size_t size, int direction);
Glauber Costa6f536632008-03-25 18:36:20 -030028 void (*unmap_single)(struct device *dev, dma_addr_t addr,
29 size_t size, int direction);
30 void (*sync_single_for_cpu)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
32 int direction);
33 void (*sync_single_for_device)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
35 int direction);
36 void (*sync_single_range_for_cpu)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_single_range_for_device)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_sg_for_cpu)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
44 int direction);
45 void (*sync_sg_for_device)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
47 int direction);
48 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
49 int nents, int direction);
50 void (*unmap_sg)(struct device *hwdev,
51 struct scatterlist *sg, int nents,
52 int direction);
53 int (*dma_supported)(struct device *hwdev, u64 mask);
54 int is_phys;
55};
56
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070057extern struct dma_mapping_ops *dma_ops;
Glauber Costa22456b92008-03-25 18:36:21 -030058
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070059static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
Glauber Costac786df02008-03-25 18:36:37 -030060{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070061#ifdef CONFIG_X86_32
62 return dma_ops;
63#else
64 if (unlikely(!dev) || !dev->archdata.dma_ops)
65 return dma_ops;
66 else
67 return dev->archdata.dma_ops;
68#endif
69}
70
71/* Make sure we keep the same behaviour */
72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
73{
74#ifdef CONFIG_X86_32
75 return 0;
76#else
77 struct dma_mapping_ops *ops = get_dma_ops(dev);
78 if (ops->mapping_error)
79 return ops->mapping_error(dev, dma_addr);
Glauber Costac786df02008-03-25 18:36:37 -030080
81 return (dma_addr == bad_dma_address);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070082#endif
Glauber Costac786df02008-03-25 18:36:37 -030083}
84
Glauber Costa8d396de2008-03-25 18:36:31 -030085#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
86#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Joerg Roedel6c505ce2008-08-19 16:32:45 +020087#define dma_is_consistent(d, h) (1)
Glauber Costa8d396de2008-03-25 18:36:31 -030088
Glauber Costa802c1f62008-03-25 18:36:34 -030089extern int dma_supported(struct device *hwdev, u64 mask);
90extern int dma_set_mask(struct device *dev, u64 mask);
91
Glauber Costa22456b92008-03-25 18:36:21 -030092static inline dma_addr_t
93dma_map_single(struct device *hwdev, void *ptr, size_t size,
94 int direction)
95{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070096 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
97
Glauber Costa22456b92008-03-25 18:36:21 -030098 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070099 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
Glauber Costa22456b92008-03-25 18:36:21 -0300100}
101
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300102static inline void
103dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
104 int direction)
105{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700106 struct dma_mapping_ops *ops = get_dma_ops(dev);
107
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300108 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700109 if (ops->unmap_single)
110 ops->unmap_single(dev, addr, size, direction);
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300111}
112
Glauber Costa16a3ce92008-03-25 18:36:23 -0300113static inline int
114dma_map_sg(struct device *hwdev, struct scatterlist *sg,
115 int nents, int direction)
116{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700117 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
118
Glauber Costa16a3ce92008-03-25 18:36:23 -0300119 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700120 return ops->map_sg(hwdev, sg, nents, direction);
Glauber Costa16a3ce92008-03-25 18:36:23 -0300121}
Glauber Costa72c784f2008-03-25 18:36:24 -0300122
123static inline void
124dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
125 int direction)
126{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700127 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
128
Glauber Costa72c784f2008-03-25 18:36:24 -0300129 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700130 if (ops->unmap_sg)
131 ops->unmap_sg(hwdev, sg, nents, direction);
Glauber Costa72c784f2008-03-25 18:36:24 -0300132}
Glauber Costac01dd8c2008-03-25 18:36:25 -0300133
134static inline void
135dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
136 size_t size, int direction)
137{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700138 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
139
Glauber Costac01dd8c2008-03-25 18:36:25 -0300140 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700141 if (ops->sync_single_for_cpu)
142 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
Glauber Costac01dd8c2008-03-25 18:36:25 -0300143 flush_write_buffers();
144}
145
Glauber Costa9231b262008-03-25 18:36:26 -0300146static inline void
147dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
148 size_t size, int direction)
149{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700150 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
151
Glauber Costa9231b262008-03-25 18:36:26 -0300152 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700153 if (ops->sync_single_for_device)
154 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
Glauber Costa9231b262008-03-25 18:36:26 -0300155 flush_write_buffers();
156}
157
Glauber Costa627610f2008-03-25 18:36:27 -0300158static inline void
159dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
160 unsigned long offset, size_t size, int direction)
161{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700162 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
Glauber Costa627610f2008-03-25 18:36:27 -0300163
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700164 BUG_ON(!valid_dma_direction(direction));
165 if (ops->sync_single_range_for_cpu)
166 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
167 size, direction);
Glauber Costa627610f2008-03-25 18:36:27 -0300168 flush_write_buffers();
169}
Glauber Costa71362332008-03-25 18:36:28 -0300170
171static inline void
172dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
173 unsigned long offset, size_t size,
174 int direction)
175{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700176 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
Glauber Costa71362332008-03-25 18:36:28 -0300177
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700178 BUG_ON(!valid_dma_direction(direction));
179 if (ops->sync_single_range_for_device)
180 ops->sync_single_range_for_device(hwdev, dma_handle,
181 offset, size, direction);
Glauber Costa71362332008-03-25 18:36:28 -0300182 flush_write_buffers();
183}
184
Glauber Costaed435de2008-03-25 18:36:29 -0300185static inline void
186dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
187 int nelems, int direction)
188{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700189 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
190
Glauber Costaed435de2008-03-25 18:36:29 -0300191 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700192 if (ops->sync_sg_for_cpu)
193 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
Glauber Costaed435de2008-03-25 18:36:29 -0300194 flush_write_buffers();
195}
Glauber Costae7f3a912008-03-25 18:36:30 -0300196
197static inline void
198dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
199 int nelems, int direction)
200{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700201 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
202
Glauber Costae7f3a912008-03-25 18:36:30 -0300203 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700204 if (ops->sync_sg_for_device)
205 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
Glauber Costae7f3a912008-03-25 18:36:30 -0300206
207 flush_write_buffers();
208}
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300209
210static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
211 size_t offset, size_t size,
212 int direction)
213{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700214 struct dma_mapping_ops *ops = get_dma_ops(dev);
215
Ingo Molnar2be62142008-04-19 19:19:56 +0200216 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700217 return ops->map_single(dev, page_to_phys(page) + offset,
218 size, direction);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300219}
220
221static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
222 size_t size, int direction)
223{
224 dma_unmap_single(dev, addr, size, direction);
225}
226
Glauber Costa3cb6a912008-03-25 18:36:33 -0300227static inline void
228dma_cache_sync(struct device *dev, void *vaddr, size_t size,
229 enum dma_data_direction dir)
230{
231 flush_write_buffers();
232}
Glauber Costaae17a63b2008-03-25 18:36:38 -0300233
Glauber Costab7107a32008-03-25 18:36:39 -0300234static inline int dma_get_cache_alignment(void)
235{
236 /* no easy way to get cache size on all x86, so return the
237 * maximum possible, to be safe */
238 return boot_cpu_data.x86_clflush_size;
239}
240
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900241static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
242 gfp_t gfp)
243{
244 unsigned long dma_mask = 0;
245
246 dma_mask = dev->coherent_dma_mask;
247 if (!dma_mask)
248 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
249
250 return dma_mask;
251}
252
253static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
254{
Steven Noonan9fcaff02008-09-08 16:19:11 -0700255#ifdef CONFIG_X86_64
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900256 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
257
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900258 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
259 gfp |= GFP_DMA32;
260#endif
261 return gfp;
262}
263
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200264static inline void *
265dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
266 gfp_t gfp)
267{
268 struct dma_mapping_ops *ops = get_dma_ops(dev);
269 void *memory;
Glauber Costab7107a32008-03-25 18:36:39 -0300270
FUJITA Tomonori8a53ad62008-09-08 18:10:12 +0900271 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
272
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200273 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
274 return memory;
275
276 if (!dev) {
277 dev = &x86_dma_fallback_dev;
278 gfp |= GFP_DMA;
279 }
280
FUJITA Tomonori98216262008-09-10 00:49:48 +0900281 if (!is_device_dma_capable(dev))
FUJITA Tomonoride9f5212008-09-08 18:10:11 +0900282 return NULL;
283
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900284 if (!ops->alloc_coherent)
285 return NULL;
286
287 return ops->alloc_coherent(dev, size, dma_handle,
288 dma_alloc_coherent_gfp_flags(dev, gfp));
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200289}
290
291static inline void dma_free_coherent(struct device *dev, size_t size,
292 void *vaddr, dma_addr_t bus)
293{
294 struct dma_mapping_ops *ops = get_dma_ops(dev);
295
296 WARN_ON(irqs_disabled()); /* for portability */
297
298 if (dma_release_from_coherent(dev, get_order(size), vaddr))
299 return;
300
301 if (ops->free_coherent)
302 ops->free_coherent(dev, size, vaddr, bus);
303}
304
Glauber Costa6f536632008-03-25 18:36:20 -0300305#endif