blob: 132a134d12f24432a310f66485f6586387f618dd [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_DMA_MAPPING_H
2#define _ASM_X86_DMA_MAPPING_H
Glauber Costa6f536632008-03-25 18:36:20 -03003
4/*
Randy Dunlap5872fb92009-01-29 16:28:02 -08005 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * Documentation/DMA-API.txt for documentation.
Glauber Costa6f536632008-03-25 18:36:20 -03007 */
8
9#include <linux/scatterlist.h>
10#include <asm/io.h>
11#include <asm/swiotlb.h>
Joerg Roedel6c505ce2008-08-19 16:32:45 +020012#include <asm-generic/dma-coherent.h>
Glauber Costa6f536632008-03-25 18:36:20 -030013
Glauber Costa7c183412008-03-25 18:36:36 -030014extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030015extern int iommu_merge;
Joerg Roedel6c505ce2008-08-19 16:32:45 +020016extern struct device x86_dma_fallback_dev;
Glauber Costab7107a32008-03-25 18:36:39 -030017extern int panic_on_overflow;
Glauber Costa7c183412008-03-25 18:36:36 -030018
Glauber Costa6f536632008-03-25 18:36:20 -030019struct dma_mapping_ops {
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070020 int (*mapping_error)(struct device *dev,
21 dma_addr_t dma_addr);
Glauber Costa6f536632008-03-25 18:36:20 -030022 void* (*alloc_coherent)(struct device *dev, size_t size,
23 dma_addr_t *dma_handle, gfp_t gfp);
24 void (*free_coherent)(struct device *dev, size_t size,
25 void *vaddr, dma_addr_t dma_handle);
Ingo Molnar2be62142008-04-19 19:19:56 +020026 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
Glauber Costa6f536632008-03-25 18:36:20 -030027 size_t size, int direction);
Glauber Costa6f536632008-03-25 18:36:20 -030028 void (*unmap_single)(struct device *dev, dma_addr_t addr,
29 size_t size, int direction);
30 void (*sync_single_for_cpu)(struct device *hwdev,
31 dma_addr_t dma_handle, size_t size,
32 int direction);
33 void (*sync_single_for_device)(struct device *hwdev,
34 dma_addr_t dma_handle, size_t size,
35 int direction);
36 void (*sync_single_range_for_cpu)(struct device *hwdev,
37 dma_addr_t dma_handle, unsigned long offset,
38 size_t size, int direction);
39 void (*sync_single_range_for_device)(struct device *hwdev,
40 dma_addr_t dma_handle, unsigned long offset,
41 size_t size, int direction);
42 void (*sync_sg_for_cpu)(struct device *hwdev,
43 struct scatterlist *sg, int nelems,
44 int direction);
45 void (*sync_sg_for_device)(struct device *hwdev,
46 struct scatterlist *sg, int nelems,
47 int direction);
48 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
49 int nents, int direction);
50 void (*unmap_sg)(struct device *hwdev,
51 struct scatterlist *sg, int nents,
52 int direction);
53 int (*dma_supported)(struct device *hwdev, u64 mask);
54 int is_phys;
55};
56
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070057extern struct dma_mapping_ops *dma_ops;
Glauber Costa22456b92008-03-25 18:36:21 -030058
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070059static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
Glauber Costac786df02008-03-25 18:36:37 -030060{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070061#ifdef CONFIG_X86_32
62 return dma_ops;
63#else
64 if (unlikely(!dev) || !dev->archdata.dma_ops)
65 return dma_ops;
66 else
67 return dev->archdata.dma_ops;
Jeremy Fitzhardingecfb80c92008-12-16 12:17:36 -080068#endif
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070069}
70
71/* Make sure we keep the same behaviour */
72static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
73{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070074 struct dma_mapping_ops *ops = get_dma_ops(dev);
75 if (ops->mapping_error)
76 return ops->mapping_error(dev, dma_addr);
Glauber Costac786df02008-03-25 18:36:37 -030077
Thomas Bogendoerfer7b1dedc2008-11-29 13:46:27 +010078 return (dma_addr == bad_dma_address);
Glauber Costac786df02008-03-25 18:36:37 -030079}
80
Glauber Costa8d396de2008-03-25 18:36:31 -030081#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
82#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Joerg Roedel6c505ce2008-08-19 16:32:45 +020083#define dma_is_consistent(d, h) (1)
Glauber Costa8d396de2008-03-25 18:36:31 -030084
Glauber Costa802c1f62008-03-25 18:36:34 -030085extern int dma_supported(struct device *hwdev, u64 mask);
86extern int dma_set_mask(struct device *dev, u64 mask);
87
FUJITA Tomonori9f6ac572008-09-24 20:48:35 +090088extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
89 dma_addr_t *dma_addr, gfp_t flag);
90
Glauber Costa22456b92008-03-25 18:36:21 -030091static inline dma_addr_t
92dma_map_single(struct device *hwdev, void *ptr, size_t size,
93 int direction)
94{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070095 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
96
Glauber Costa22456b92008-03-25 18:36:21 -030097 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070098 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
Glauber Costa22456b92008-03-25 18:36:21 -030099}
100
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300101static inline void
102dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
103 int direction)
104{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700105 struct dma_mapping_ops *ops = get_dma_ops(dev);
106
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300107 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700108 if (ops->unmap_single)
109 ops->unmap_single(dev, addr, size, direction);
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300110}
111
Glauber Costa16a3ce92008-03-25 18:36:23 -0300112static inline int
113dma_map_sg(struct device *hwdev, struct scatterlist *sg,
114 int nents, int direction)
115{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700116 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
117
Glauber Costa16a3ce92008-03-25 18:36:23 -0300118 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700119 return ops->map_sg(hwdev, sg, nents, direction);
Glauber Costa16a3ce92008-03-25 18:36:23 -0300120}
Glauber Costa72c784f2008-03-25 18:36:24 -0300121
122static inline void
123dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
124 int direction)
125{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700126 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
127
Glauber Costa72c784f2008-03-25 18:36:24 -0300128 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700129 if (ops->unmap_sg)
130 ops->unmap_sg(hwdev, sg, nents, direction);
Glauber Costa72c784f2008-03-25 18:36:24 -0300131}
Glauber Costac01dd8c2008-03-25 18:36:25 -0300132
133static inline void
134dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
135 size_t size, int direction)
136{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700137 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
138
Glauber Costac01dd8c2008-03-25 18:36:25 -0300139 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700140 if (ops->sync_single_for_cpu)
141 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
Glauber Costac01dd8c2008-03-25 18:36:25 -0300142 flush_write_buffers();
143}
144
Glauber Costa9231b262008-03-25 18:36:26 -0300145static inline void
146dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
147 size_t size, int direction)
148{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700149 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
150
Glauber Costa9231b262008-03-25 18:36:26 -0300151 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700152 if (ops->sync_single_for_device)
153 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
Glauber Costa9231b262008-03-25 18:36:26 -0300154 flush_write_buffers();
155}
156
Glauber Costa627610f2008-03-25 18:36:27 -0300157static inline void
158dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
159 unsigned long offset, size_t size, int direction)
160{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700161 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
Glauber Costa627610f2008-03-25 18:36:27 -0300162
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700163 BUG_ON(!valid_dma_direction(direction));
164 if (ops->sync_single_range_for_cpu)
165 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
166 size, direction);
Glauber Costa627610f2008-03-25 18:36:27 -0300167 flush_write_buffers();
168}
Glauber Costa71362332008-03-25 18:36:28 -0300169
170static inline void
171dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
172 unsigned long offset, size_t size,
173 int direction)
174{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700175 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
Glauber Costa71362332008-03-25 18:36:28 -0300176
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700177 BUG_ON(!valid_dma_direction(direction));
178 if (ops->sync_single_range_for_device)
179 ops->sync_single_range_for_device(hwdev, dma_handle,
180 offset, size, direction);
Glauber Costa71362332008-03-25 18:36:28 -0300181 flush_write_buffers();
182}
183
Glauber Costaed435de2008-03-25 18:36:29 -0300184static inline void
185dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
186 int nelems, int direction)
187{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700188 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
189
Glauber Costaed435de2008-03-25 18:36:29 -0300190 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700191 if (ops->sync_sg_for_cpu)
192 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
Glauber Costaed435de2008-03-25 18:36:29 -0300193 flush_write_buffers();
194}
Glauber Costae7f3a912008-03-25 18:36:30 -0300195
196static inline void
197dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
198 int nelems, int direction)
199{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700200 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
201
Glauber Costae7f3a912008-03-25 18:36:30 -0300202 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700203 if (ops->sync_sg_for_device)
204 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
Glauber Costae7f3a912008-03-25 18:36:30 -0300205
206 flush_write_buffers();
207}
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300208
209static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
210 size_t offset, size_t size,
211 int direction)
212{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700213 struct dma_mapping_ops *ops = get_dma_ops(dev);
214
Ingo Molnar2be62142008-04-19 19:19:56 +0200215 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700216 return ops->map_single(dev, page_to_phys(page) + offset,
217 size, direction);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300218}
219
220static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
221 size_t size, int direction)
222{
223 dma_unmap_single(dev, addr, size, direction);
224}
225
Glauber Costa3cb6a912008-03-25 18:36:33 -0300226static inline void
227dma_cache_sync(struct device *dev, void *vaddr, size_t size,
228 enum dma_data_direction dir)
229{
230 flush_write_buffers();
231}
Glauber Costaae17a63b2008-03-25 18:36:38 -0300232
Glauber Costab7107a32008-03-25 18:36:39 -0300233static inline int dma_get_cache_alignment(void)
234{
235 /* no easy way to get cache size on all x86, so return the
236 * maximum possible, to be safe */
237 return boot_cpu_data.x86_clflush_size;
238}
239
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900240static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
241 gfp_t gfp)
242{
243 unsigned long dma_mask = 0;
Glauber Costab7107a32008-03-25 18:36:39 -0300244
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900245 dma_mask = dev->coherent_dma_mask;
246 if (!dma_mask)
247 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
248
249 return dma_mask;
250}
251
252static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
253{
254 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
255
FUJITA Tomonori75bebb72008-10-23 20:46:55 +0900256 if (dma_mask <= DMA_24BIT_MASK)
257 gfp |= GFP_DMA;
258#ifdef CONFIG_X86_64
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900259 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
260 gfp |= GFP_DMA32;
261#endif
262 return gfp;
263}
264
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200265static inline void *
266dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
267 gfp_t gfp)
268{
269 struct dma_mapping_ops *ops = get_dma_ops(dev);
270 void *memory;
Glauber Costaae17a63b2008-03-25 18:36:38 -0300271
FUJITA Tomonori8a53ad62008-09-08 18:10:12 +0900272 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
273
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200274 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
275 return memory;
276
277 if (!dev) {
278 dev = &x86_dma_fallback_dev;
279 gfp |= GFP_DMA;
280 }
281
FUJITA Tomonori98216262008-09-10 00:49:48 +0900282 if (!is_device_dma_capable(dev))
FUJITA Tomonoride9f5212008-09-08 18:10:11 +0900283 return NULL;
284
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900285 if (!ops->alloc_coherent)
286 return NULL;
287
288 return ops->alloc_coherent(dev, size, dma_handle,
289 dma_alloc_coherent_gfp_flags(dev, gfp));
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200290}
291
292static inline void dma_free_coherent(struct device *dev, size_t size,
293 void *vaddr, dma_addr_t bus)
294{
295 struct dma_mapping_ops *ops = get_dma_ops(dev);
296
297 WARN_ON(irqs_disabled()); /* for portability */
298
299 if (dma_release_from_coherent(dev, get_order(size), vaddr))
300 return;
301
302 if (ops->free_coherent)
303 ops->free_coherent(dev, size, vaddr, bus);
304}
305
Glauber Costa6f536632008-03-25 18:36:20 -0300306#endif