blob: f82fdc412c64b9d3371fb1a9df65f38ba141bb52 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_DMA_MAPPING_H
2#define _ASM_X86_DMA_MAPPING_H
Glauber Costa6f536632008-03-25 18:36:20 -03003
4/*
Randy Dunlap5872fb92009-01-29 16:28:02 -08005 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * Documentation/DMA-API.txt for documentation.
Glauber Costa6f536632008-03-25 18:36:20 -03007 */
8
9#include <linux/scatterlist.h>
Joerg Roedel2118d0c2009-01-09 15:13:15 +010010#include <linux/dma-debug.h>
FUJITA Tomonoriabe66022009-01-05 23:47:21 +090011#include <linux/dma-attrs.h>
Glauber Costa6f536632008-03-25 18:36:20 -030012#include <asm/io.h>
13#include <asm/swiotlb.h>
Joerg Roedel6c505ce2008-08-19 16:32:45 +020014#include <asm-generic/dma-coherent.h>
Glauber Costa6f536632008-03-25 18:36:20 -030015
Glauber Costa7c183412008-03-25 18:36:36 -030016extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030017extern int iommu_merge;
Joerg Roedel6c505ce2008-08-19 16:32:45 +020018extern struct device x86_dma_fallback_dev;
Glauber Costab7107a32008-03-25 18:36:39 -030019extern int panic_on_overflow;
Glauber Costa7c183412008-03-25 18:36:36 -030020
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090021extern struct dma_map_ops *dma_ops;
Glauber Costa6f536632008-03-25 18:36:20 -030022
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090023static inline struct dma_map_ops *get_dma_ops(struct device *dev)
Glauber Costac786df02008-03-25 18:36:37 -030024{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070025#ifdef CONFIG_X86_32
26 return dma_ops;
27#else
28 if (unlikely(!dev) || !dev->archdata.dma_ops)
29 return dma_ops;
30 else
31 return dev->archdata.dma_ops;
Jeremy Fitzhardingecfb80c92008-12-16 12:17:36 -080032#endif
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070033}
34
35/* Make sure we keep the same behaviour */
36static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
37{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090038 struct dma_map_ops *ops = get_dma_ops(dev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070039 if (ops->mapping_error)
40 return ops->mapping_error(dev, dma_addr);
Glauber Costac786df02008-03-25 18:36:37 -030041
Thomas Bogendoerfer7b1dedc2008-11-29 13:46:27 +010042 return (dma_addr == bad_dma_address);
Glauber Costac786df02008-03-25 18:36:37 -030043}
44
Glauber Costa8d396de2008-03-25 18:36:31 -030045#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
46#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Joerg Roedel6c505ce2008-08-19 16:32:45 +020047#define dma_is_consistent(d, h) (1)
Glauber Costa8d396de2008-03-25 18:36:31 -030048
Glauber Costa802c1f62008-03-25 18:36:34 -030049extern int dma_supported(struct device *hwdev, u64 mask);
50extern int dma_set_mask(struct device *dev, u64 mask);
51
FUJITA Tomonori9f6ac572008-09-24 20:48:35 +090052extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
53 dma_addr_t *dma_addr, gfp_t flag);
54
Glauber Costa22456b92008-03-25 18:36:21 -030055static inline dma_addr_t
56dma_map_single(struct device *hwdev, void *ptr, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090057 enum dma_data_direction dir)
Glauber Costa22456b92008-03-25 18:36:21 -030058{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090059 struct dma_map_ops *ops = get_dma_ops(hwdev);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010060 dma_addr_t addr;
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070061
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090062 BUG_ON(!valid_dma_direction(dir));
Joerg Roedel2118d0c2009-01-09 15:13:15 +010063 addr = ops->map_page(hwdev, virt_to_page(ptr),
FUJITA Tomonorid7dff842009-01-05 23:47:28 +090064 (unsigned long)ptr & ~PAGE_MASK, size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090065 dir, NULL);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010066 debug_dma_map_page(hwdev, virt_to_page(ptr),
67 (unsigned long)ptr & ~PAGE_MASK, size,
68 dir, addr, true);
69 return addr;
Glauber Costa22456b92008-03-25 18:36:21 -030070}
71
Glauber Costa0cb0ae62008-03-25 18:36:22 -030072static inline void
73dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090074 enum dma_data_direction dir)
Glauber Costa0cb0ae62008-03-25 18:36:22 -030075{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090076 struct dma_map_ops *ops = get_dma_ops(dev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070077
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090078 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonorid7dff842009-01-05 23:47:28 +090079 if (ops->unmap_page)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090080 ops->unmap_page(dev, addr, size, dir, NULL);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010081 debug_dma_unmap_page(dev, addr, size, dir, true);
Glauber Costa0cb0ae62008-03-25 18:36:22 -030082}
83
Glauber Costa16a3ce92008-03-25 18:36:23 -030084static inline int
85dma_map_sg(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090086 int nents, enum dma_data_direction dir)
Glauber Costa16a3ce92008-03-25 18:36:23 -030087{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090088 struct dma_map_ops *ops = get_dma_ops(hwdev);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010089 int ents;
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070090
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090091 BUG_ON(!valid_dma_direction(dir));
Joerg Roedel2118d0c2009-01-09 15:13:15 +010092 ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
93 debug_dma_map_sg(hwdev, sg, nents, ents, dir);
94
95 return ents;
Glauber Costa16a3ce92008-03-25 18:36:23 -030096}
Glauber Costa72c784f2008-03-25 18:36:24 -030097
98static inline void
99dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900100 enum dma_data_direction dir)
Glauber Costa72c784f2008-03-25 18:36:24 -0300101{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900102 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700103
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900104 BUG_ON(!valid_dma_direction(dir));
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100105 debug_dma_unmap_sg(hwdev, sg, nents, dir);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700106 if (ops->unmap_sg)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900107 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
Glauber Costa72c784f2008-03-25 18:36:24 -0300108}
Glauber Costac01dd8c2008-03-25 18:36:25 -0300109
110static inline void
111dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900112 size_t size, enum dma_data_direction dir)
Glauber Costac01dd8c2008-03-25 18:36:25 -0300113{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900114 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700115
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900116 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700117 if (ops->sync_single_for_cpu)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900118 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100119 debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
Glauber Costac01dd8c2008-03-25 18:36:25 -0300120 flush_write_buffers();
121}
122
Glauber Costa9231b262008-03-25 18:36:26 -0300123static inline void
124dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900125 size_t size, enum dma_data_direction dir)
Glauber Costa9231b262008-03-25 18:36:26 -0300126{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900127 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700128
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900129 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700130 if (ops->sync_single_for_device)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900131 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100132 debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
Glauber Costa9231b262008-03-25 18:36:26 -0300133 flush_write_buffers();
134}
135
Glauber Costa627610f2008-03-25 18:36:27 -0300136static inline void
137dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900138 unsigned long offset, size_t size,
139 enum dma_data_direction dir)
Glauber Costa627610f2008-03-25 18:36:27 -0300140{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900141 struct dma_map_ops *ops = get_dma_ops(hwdev);
Glauber Costa627610f2008-03-25 18:36:27 -0300142
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900143 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700144 if (ops->sync_single_range_for_cpu)
145 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900146 size, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100147 debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
148 offset, size, dir);
Glauber Costa627610f2008-03-25 18:36:27 -0300149 flush_write_buffers();
150}
Glauber Costa71362332008-03-25 18:36:28 -0300151
152static inline void
153dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
154 unsigned long offset, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900155 enum dma_data_direction dir)
Glauber Costa71362332008-03-25 18:36:28 -0300156{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900157 struct dma_map_ops *ops = get_dma_ops(hwdev);
Glauber Costa71362332008-03-25 18:36:28 -0300158
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900159 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700160 if (ops->sync_single_range_for_device)
161 ops->sync_single_range_for_device(hwdev, dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900162 offset, size, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100163 debug_dma_sync_single_range_for_device(hwdev, dma_handle,
164 offset, size, dir);
Glauber Costa71362332008-03-25 18:36:28 -0300165 flush_write_buffers();
166}
167
Glauber Costaed435de2008-03-25 18:36:29 -0300168static inline void
169dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900170 int nelems, enum dma_data_direction dir)
Glauber Costaed435de2008-03-25 18:36:29 -0300171{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900172 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700173
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900174 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700175 if (ops->sync_sg_for_cpu)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900176 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100177 debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
Glauber Costaed435de2008-03-25 18:36:29 -0300178 flush_write_buffers();
179}
Glauber Costae7f3a912008-03-25 18:36:30 -0300180
181static inline void
182dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900183 int nelems, enum dma_data_direction dir)
Glauber Costae7f3a912008-03-25 18:36:30 -0300184{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900185 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700186
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900187 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700188 if (ops->sync_sg_for_device)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900189 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100190 debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);
Glauber Costae7f3a912008-03-25 18:36:30 -0300191
192 flush_write_buffers();
193}
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300194
195static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
196 size_t offset, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900197 enum dma_data_direction dir)
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300198{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900199 struct dma_map_ops *ops = get_dma_ops(dev);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100200 dma_addr_t addr;
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700201
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900202 BUG_ON(!valid_dma_direction(dir));
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100203 addr = ops->map_page(dev, page, offset, size, dir, NULL);
204 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
205
206 return addr;
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300207}
208
209static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900210 size_t size, enum dma_data_direction dir)
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300211{
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100212 struct dma_map_ops *ops = get_dma_ops(dev);
213
214 BUG_ON(!valid_dma_direction(dir));
215 if (ops->unmap_page)
216 ops->unmap_page(dev, addr, size, dir, NULL);
217 debug_dma_unmap_page(dev, addr, size, dir, false);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300218}
219
Glauber Costa3cb6a912008-03-25 18:36:33 -0300220static inline void
221dma_cache_sync(struct device *dev, void *vaddr, size_t size,
222 enum dma_data_direction dir)
223{
224 flush_write_buffers();
225}
Glauber Costaae17a63b2008-03-25 18:36:38 -0300226
Glauber Costab7107a32008-03-25 18:36:39 -0300227static inline int dma_get_cache_alignment(void)
228{
229 /* no easy way to get cache size on all x86, so return the
230 * maximum possible, to be safe */
231 return boot_cpu_data.x86_clflush_size;
232}
233
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900234static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
235 gfp_t gfp)
236{
237 unsigned long dma_mask = 0;
Glauber Costab7107a32008-03-25 18:36:39 -0300238
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900239 dma_mask = dev->coherent_dma_mask;
240 if (!dma_mask)
Yang Hongyang2f4f27d2009-04-06 19:01:18 -0700241 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900242
243 return dma_mask;
244}
245
246static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
247{
248 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
249
Yang Hongyang2f4f27d2009-04-06 19:01:18 -0700250 if (dma_mask <= DMA_BIT_MASK(24))
FUJITA Tomonori75bebb72008-10-23 20:46:55 +0900251 gfp |= GFP_DMA;
252#ifdef CONFIG_X86_64
Yang Hongyang284901a2009-04-06 19:01:15 -0700253 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900254 gfp |= GFP_DMA32;
255#endif
256 return gfp;
257}
258
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200259static inline void *
260dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
261 gfp_t gfp)
262{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900263 struct dma_map_ops *ops = get_dma_ops(dev);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200264 void *memory;
Glauber Costaae17a63b2008-03-25 18:36:38 -0300265
FUJITA Tomonori8a53ad62008-09-08 18:10:12 +0900266 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
267
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200268 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
269 return memory;
270
271 if (!dev) {
272 dev = &x86_dma_fallback_dev;
273 gfp |= GFP_DMA;
274 }
275
FUJITA Tomonori98216262008-09-10 00:49:48 +0900276 if (!is_device_dma_capable(dev))
FUJITA Tomonoride9f5212008-09-08 18:10:11 +0900277 return NULL;
278
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900279 if (!ops->alloc_coherent)
280 return NULL;
281
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100282 memory = ops->alloc_coherent(dev, size, dma_handle,
283 dma_alloc_coherent_gfp_flags(dev, gfp));
284 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
285
286 return memory;
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200287}
288
289static inline void dma_free_coherent(struct device *dev, size_t size,
290 void *vaddr, dma_addr_t bus)
291{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900292 struct dma_map_ops *ops = get_dma_ops(dev);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200293
294 WARN_ON(irqs_disabled()); /* for portability */
295
296 if (dma_release_from_coherent(dev, get_order(size), vaddr))
297 return;
298
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100299 debug_dma_free_coherent(dev, size, vaddr, bus);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200300 if (ops->free_coherent)
301 ops->free_coherent(dev, size, vaddr, bus);
302}
303
Glauber Costa6f536632008-03-25 18:36:20 -0300304#endif