blob: 56075320b81383db36b063955e7e28d597d2ab67 [file] [log] [blame]
Glauber Costa6f536632008-03-25 18:36:20 -03001#ifndef _ASM_DMA_MAPPING_H_
2#define _ASM_DMA_MAPPING_H_
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
10#include <asm/io.h>
11#include <asm/swiotlb.h>
Joerg Roedel6c505ce2008-08-19 16:32:45 +020012#include <asm-generic/dma-coherent.h>
Glauber Costa6f536632008-03-25 18:36:20 -030013
Glauber Costa7c183412008-03-25 18:36:36 -030014extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030015extern int iommu_merge;
Joerg Roedel6c505ce2008-08-19 16:32:45 +020016extern struct device x86_dma_fallback_dev;
Glauber Costab7107a32008-03-25 18:36:39 -030017extern int panic_on_overflow;
Glauber Costafae9a0d2008-04-08 13:20:56 -030018extern int force_iommu;
Glauber Costa7c183412008-03-25 18:36:36 -030019
Glauber Costa6f536632008-03-25 18:36:20 -030020struct dma_mapping_ops {
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070021 int (*mapping_error)(struct device *dev,
22 dma_addr_t dma_addr);
Glauber Costa6f536632008-03-25 18:36:20 -030023 void* (*alloc_coherent)(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t gfp);
25 void (*free_coherent)(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle);
Ingo Molnar2be62142008-04-19 19:19:56 +020027 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
Glauber Costa6f536632008-03-25 18:36:20 -030028 size_t size, int direction);
Glauber Costa6f536632008-03-25 18:36:20 -030029 void (*unmap_single)(struct device *dev, dma_addr_t addr,
30 size_t size, int direction);
31 void (*sync_single_for_cpu)(struct device *hwdev,
32 dma_addr_t dma_handle, size_t size,
33 int direction);
34 void (*sync_single_for_device)(struct device *hwdev,
35 dma_addr_t dma_handle, size_t size,
36 int direction);
37 void (*sync_single_range_for_cpu)(struct device *hwdev,
38 dma_addr_t dma_handle, unsigned long offset,
39 size_t size, int direction);
40 void (*sync_single_range_for_device)(struct device *hwdev,
41 dma_addr_t dma_handle, unsigned long offset,
42 size_t size, int direction);
43 void (*sync_sg_for_cpu)(struct device *hwdev,
44 struct scatterlist *sg, int nelems,
45 int direction);
46 void (*sync_sg_for_device)(struct device *hwdev,
47 struct scatterlist *sg, int nelems,
48 int direction);
49 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
50 int nents, int direction);
51 void (*unmap_sg)(struct device *hwdev,
52 struct scatterlist *sg, int nents,
53 int direction);
54 int (*dma_supported)(struct device *hwdev, u64 mask);
55 int is_phys;
56};
57
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070058extern struct dma_mapping_ops *dma_ops;
Glauber Costa22456b92008-03-25 18:36:21 -030059
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070060static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
Glauber Costac786df02008-03-25 18:36:37 -030061{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070062#ifdef CONFIG_X86_32
63 return dma_ops;
64#else
65 if (unlikely(!dev) || !dev->archdata.dma_ops)
66 return dma_ops;
67 else
68 return dev->archdata.dma_ops;
69#endif
70}
71
72/* Make sure we keep the same behaviour */
73static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
74{
75#ifdef CONFIG_X86_32
76 return 0;
77#else
78 struct dma_mapping_ops *ops = get_dma_ops(dev);
79 if (ops->mapping_error)
80 return ops->mapping_error(dev, dma_addr);
Glauber Costac786df02008-03-25 18:36:37 -030081
82 return (dma_addr == bad_dma_address);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070083#endif
Glauber Costac786df02008-03-25 18:36:37 -030084}
85
Glauber Costa8d396de2008-03-25 18:36:31 -030086#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
87#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Joerg Roedel6c505ce2008-08-19 16:32:45 +020088#define dma_is_consistent(d, h) (1)
Glauber Costa8d396de2008-03-25 18:36:31 -030089
Glauber Costa802c1f62008-03-25 18:36:34 -030090extern int dma_supported(struct device *hwdev, u64 mask);
91extern int dma_set_mask(struct device *dev, u64 mask);
92
Glauber Costa22456b92008-03-25 18:36:21 -030093static inline dma_addr_t
94dma_map_single(struct device *hwdev, void *ptr, size_t size,
95 int direction)
96{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070097 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
98
Glauber Costa22456b92008-03-25 18:36:21 -030099 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700100 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
Glauber Costa22456b92008-03-25 18:36:21 -0300101}
102
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300103static inline void
104dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
105 int direction)
106{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700107 struct dma_mapping_ops *ops = get_dma_ops(dev);
108
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300109 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700110 if (ops->unmap_single)
111 ops->unmap_single(dev, addr, size, direction);
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300112}
113
Glauber Costa16a3ce92008-03-25 18:36:23 -0300114static inline int
115dma_map_sg(struct device *hwdev, struct scatterlist *sg,
116 int nents, int direction)
117{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700118 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
119
Glauber Costa16a3ce92008-03-25 18:36:23 -0300120 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700121 return ops->map_sg(hwdev, sg, nents, direction);
Glauber Costa16a3ce92008-03-25 18:36:23 -0300122}
Glauber Costa72c784f2008-03-25 18:36:24 -0300123
124static inline void
125dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
126 int direction)
127{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700128 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
129
Glauber Costa72c784f2008-03-25 18:36:24 -0300130 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700131 if (ops->unmap_sg)
132 ops->unmap_sg(hwdev, sg, nents, direction);
Glauber Costa72c784f2008-03-25 18:36:24 -0300133}
Glauber Costac01dd8c2008-03-25 18:36:25 -0300134
135static inline void
136dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
137 size_t size, int direction)
138{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700139 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
140
Glauber Costac01dd8c2008-03-25 18:36:25 -0300141 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700142 if (ops->sync_single_for_cpu)
143 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
Glauber Costac01dd8c2008-03-25 18:36:25 -0300144 flush_write_buffers();
145}
146
Glauber Costa9231b262008-03-25 18:36:26 -0300147static inline void
148dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
149 size_t size, int direction)
150{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700151 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
152
Glauber Costa9231b262008-03-25 18:36:26 -0300153 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700154 if (ops->sync_single_for_device)
155 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
Glauber Costa9231b262008-03-25 18:36:26 -0300156 flush_write_buffers();
157}
158
Glauber Costa627610f2008-03-25 18:36:27 -0300159static inline void
160dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
161 unsigned long offset, size_t size, int direction)
162{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700163 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
Glauber Costa627610f2008-03-25 18:36:27 -0300164
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700165 BUG_ON(!valid_dma_direction(direction));
166 if (ops->sync_single_range_for_cpu)
167 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
168 size, direction);
Glauber Costa627610f2008-03-25 18:36:27 -0300169 flush_write_buffers();
170}
Glauber Costa71362332008-03-25 18:36:28 -0300171
172static inline void
173dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
174 unsigned long offset, size_t size,
175 int direction)
176{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700177 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
Glauber Costa71362332008-03-25 18:36:28 -0300178
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700179 BUG_ON(!valid_dma_direction(direction));
180 if (ops->sync_single_range_for_device)
181 ops->sync_single_range_for_device(hwdev, dma_handle,
182 offset, size, direction);
Glauber Costa71362332008-03-25 18:36:28 -0300183 flush_write_buffers();
184}
185
Glauber Costaed435de2008-03-25 18:36:29 -0300186static inline void
187dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
188 int nelems, int direction)
189{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700190 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
191
Glauber Costaed435de2008-03-25 18:36:29 -0300192 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700193 if (ops->sync_sg_for_cpu)
194 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
Glauber Costaed435de2008-03-25 18:36:29 -0300195 flush_write_buffers();
196}
Glauber Costae7f3a912008-03-25 18:36:30 -0300197
198static inline void
199dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
200 int nelems, int direction)
201{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700202 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
203
Glauber Costae7f3a912008-03-25 18:36:30 -0300204 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700205 if (ops->sync_sg_for_device)
206 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
Glauber Costae7f3a912008-03-25 18:36:30 -0300207
208 flush_write_buffers();
209}
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300210
211static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
212 size_t offset, size_t size,
213 int direction)
214{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700215 struct dma_mapping_ops *ops = get_dma_ops(dev);
216
Ingo Molnar2be62142008-04-19 19:19:56 +0200217 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700218 return ops->map_single(dev, page_to_phys(page) + offset,
219 size, direction);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300220}
221
222static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
223 size_t size, int direction)
224{
225 dma_unmap_single(dev, addr, size, direction);
226}
227
Glauber Costa3cb6a912008-03-25 18:36:33 -0300228static inline void
229dma_cache_sync(struct device *dev, void *vaddr, size_t size,
230 enum dma_data_direction dir)
231{
232 flush_write_buffers();
233}
Glauber Costaae17a632008-03-25 18:36:38 -0300234
Glauber Costab7107a32008-03-25 18:36:39 -0300235static inline int dma_get_cache_alignment(void)
236{
237 /* no easy way to get cache size on all x86, so return the
238 * maximum possible, to be safe */
239 return boot_cpu_data.x86_clflush_size;
240}
241
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900242static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
243 gfp_t gfp)
244{
245 unsigned long dma_mask = 0;
246
247 dma_mask = dev->coherent_dma_mask;
248 if (!dma_mask)
249 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
250
251 return dma_mask;
252}
253
254static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
255{
Steven Noonan9fcaff02008-09-08 16:19:11 -0700256#ifdef CONFIG_X86_64
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900257 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
258
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900259 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
260 gfp |= GFP_DMA32;
261#endif
262 return gfp;
263}
264
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200265static inline void *
266dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
267 gfp_t gfp)
268{
269 struct dma_mapping_ops *ops = get_dma_ops(dev);
270 void *memory;
Glauber Costab7107a32008-03-25 18:36:39 -0300271
FUJITA Tomonori8a53ad62008-09-08 18:10:12 +0900272 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
273
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200274 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
275 return memory;
276
277 if (!dev) {
278 dev = &x86_dma_fallback_dev;
279 gfp |= GFP_DMA;
280 }
281
FUJITA Tomonoride9f5212008-09-08 18:10:11 +0900282 if (!dev->dma_mask)
283 return NULL;
284
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900285 if (!ops->alloc_coherent)
286 return NULL;
287
288 return ops->alloc_coherent(dev, size, dma_handle,
289 dma_alloc_coherent_gfp_flags(dev, gfp));
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200290}
291
292static inline void dma_free_coherent(struct device *dev, size_t size,
293 void *vaddr, dma_addr_t bus)
294{
295 struct dma_mapping_ops *ops = get_dma_ops(dev);
296
297 WARN_ON(irqs_disabled()); /* for portability */
298
299 if (dma_release_from_coherent(dev, get_order(size), vaddr))
300 return;
301
302 if (ops->free_coherent)
303 ops->free_coherent(dev, size, vaddr, bus);
304}
305
Glauber Costa6f536632008-03-25 18:36:20 -0300306#endif