blob: b93405b228b47868dba489944324a147852f90cc [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_DMA_MAPPING_H
2#define _ASM_X86_DMA_MAPPING_H
Glauber Costa6f536632008-03-25 18:36:20 -03003
4/*
Randy Dunlap5872fb92009-01-29 16:28:02 -08005 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * Documentation/DMA-API.txt for documentation.
Glauber Costa6f536632008-03-25 18:36:20 -03007 */
8
Vegard Nossumd7002852008-07-20 10:44:54 +02009#include <linux/kmemcheck.h>
Glauber Costa6f536632008-03-25 18:36:20 -030010#include <linux/scatterlist.h>
Joerg Roedel2118d0c2009-01-09 15:13:15 +010011#include <linux/dma-debug.h>
FUJITA Tomonoriabe66022009-01-05 23:47:21 +090012#include <linux/dma-attrs.h>
Glauber Costa6f536632008-03-25 18:36:20 -030013#include <asm/io.h>
14#include <asm/swiotlb.h>
Joerg Roedel6c505ce2008-08-19 16:32:45 +020015#include <asm-generic/dma-coherent.h>
Glauber Costa6f536632008-03-25 18:36:20 -030016
Glauber Costa7c183412008-03-25 18:36:36 -030017extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030018extern int iommu_merge;
Joerg Roedel6c505ce2008-08-19 16:32:45 +020019extern struct device x86_dma_fallback_dev;
Glauber Costab7107a32008-03-25 18:36:39 -030020extern int panic_on_overflow;
Glauber Costa7c183412008-03-25 18:36:36 -030021
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090022extern struct dma_map_ops *dma_ops;
Glauber Costa6f536632008-03-25 18:36:20 -030023
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090024static inline struct dma_map_ops *get_dma_ops(struct device *dev)
Glauber Costac786df02008-03-25 18:36:37 -030025{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070026#ifdef CONFIG_X86_32
27 return dma_ops;
28#else
29 if (unlikely(!dev) || !dev->archdata.dma_ops)
30 return dma_ops;
31 else
32 return dev->archdata.dma_ops;
Jeremy Fitzhardingecfb80c92008-12-16 12:17:36 -080033#endif
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070034}
35
36/* Make sure we keep the same behaviour */
37static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
38{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090039 struct dma_map_ops *ops = get_dma_ops(dev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070040 if (ops->mapping_error)
41 return ops->mapping_error(dev, dma_addr);
Glauber Costac786df02008-03-25 18:36:37 -030042
Thomas Bogendoerfer7b1dedc2008-11-29 13:46:27 +010043 return (dma_addr == bad_dma_address);
Glauber Costac786df02008-03-25 18:36:37 -030044}
45
Glauber Costa8d396de2008-03-25 18:36:31 -030046#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
47#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Joerg Roedel6c505ce2008-08-19 16:32:45 +020048#define dma_is_consistent(d, h) (1)
Glauber Costa8d396de2008-03-25 18:36:31 -030049
Glauber Costa802c1f62008-03-25 18:36:34 -030050extern int dma_supported(struct device *hwdev, u64 mask);
51extern int dma_set_mask(struct device *dev, u64 mask);
52
FUJITA Tomonori9f6ac572008-09-24 20:48:35 +090053extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
54 dma_addr_t *dma_addr, gfp_t flag);
55
Glauber Costa22456b92008-03-25 18:36:21 -030056static inline dma_addr_t
57dma_map_single(struct device *hwdev, void *ptr, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090058 enum dma_data_direction dir)
Glauber Costa22456b92008-03-25 18:36:21 -030059{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090060 struct dma_map_ops *ops = get_dma_ops(hwdev);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010061 dma_addr_t addr;
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070062
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090063 BUG_ON(!valid_dma_direction(dir));
Vegard Nossumd7002852008-07-20 10:44:54 +020064 kmemcheck_mark_initialized(ptr, size);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010065 addr = ops->map_page(hwdev, virt_to_page(ptr),
FUJITA Tomonorid7dff842009-01-05 23:47:28 +090066 (unsigned long)ptr & ~PAGE_MASK, size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090067 dir, NULL);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010068 debug_dma_map_page(hwdev, virt_to_page(ptr),
69 (unsigned long)ptr & ~PAGE_MASK, size,
70 dir, addr, true);
71 return addr;
Glauber Costa22456b92008-03-25 18:36:21 -030072}
73
Glauber Costa0cb0ae62008-03-25 18:36:22 -030074static inline void
75dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090076 enum dma_data_direction dir)
Glauber Costa0cb0ae62008-03-25 18:36:22 -030077{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090078 struct dma_map_ops *ops = get_dma_ops(dev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070079
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090080 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonorid7dff842009-01-05 23:47:28 +090081 if (ops->unmap_page)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090082 ops->unmap_page(dev, addr, size, dir, NULL);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010083 debug_dma_unmap_page(dev, addr, size, dir, true);
Glauber Costa0cb0ae62008-03-25 18:36:22 -030084}
85
Glauber Costa16a3ce92008-03-25 18:36:23 -030086static inline int
87dma_map_sg(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090088 int nents, enum dma_data_direction dir)
Glauber Costa16a3ce92008-03-25 18:36:23 -030089{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090090 struct dma_map_ops *ops = get_dma_ops(hwdev);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010091 int ents;
Vegard Nossum9b5cab32009-02-21 13:52:37 +010092 struct scatterlist *s;
93 int i;
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070094
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090095 BUG_ON(!valid_dma_direction(dir));
Vegard Nossum9b5cab32009-02-21 13:52:37 +010096 for_each_sg(sg, s, nents, i)
97 kmemcheck_mark_initialized(sg_virt(s), s->length);
Joerg Roedel2118d0c2009-01-09 15:13:15 +010098 ents = ops->map_sg(hwdev, sg, nents, dir, NULL);
99 debug_dma_map_sg(hwdev, sg, nents, ents, dir);
100
101 return ents;
Glauber Costa16a3ce92008-03-25 18:36:23 -0300102}
Glauber Costa72c784f2008-03-25 18:36:24 -0300103
104static inline void
105dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900106 enum dma_data_direction dir)
Glauber Costa72c784f2008-03-25 18:36:24 -0300107{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900108 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700109
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900110 BUG_ON(!valid_dma_direction(dir));
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100111 debug_dma_unmap_sg(hwdev, sg, nents, dir);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700112 if (ops->unmap_sg)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900113 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
Glauber Costa72c784f2008-03-25 18:36:24 -0300114}
Glauber Costac01dd8c2008-03-25 18:36:25 -0300115
116static inline void
117dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900118 size_t size, enum dma_data_direction dir)
Glauber Costac01dd8c2008-03-25 18:36:25 -0300119{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900120 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700121
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900122 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700123 if (ops->sync_single_for_cpu)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900124 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100125 debug_dma_sync_single_for_cpu(hwdev, dma_handle, size, dir);
Glauber Costac01dd8c2008-03-25 18:36:25 -0300126 flush_write_buffers();
127}
128
Glauber Costa9231b262008-03-25 18:36:26 -0300129static inline void
130dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900131 size_t size, enum dma_data_direction dir)
Glauber Costa9231b262008-03-25 18:36:26 -0300132{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900133 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700134
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900135 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700136 if (ops->sync_single_for_device)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900137 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100138 debug_dma_sync_single_for_device(hwdev, dma_handle, size, dir);
Glauber Costa9231b262008-03-25 18:36:26 -0300139 flush_write_buffers();
140}
141
Glauber Costa627610f2008-03-25 18:36:27 -0300142static inline void
143dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900144 unsigned long offset, size_t size,
145 enum dma_data_direction dir)
Glauber Costa627610f2008-03-25 18:36:27 -0300146{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900147 struct dma_map_ops *ops = get_dma_ops(hwdev);
Glauber Costa627610f2008-03-25 18:36:27 -0300148
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900149 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700150 if (ops->sync_single_range_for_cpu)
151 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900152 size, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100153 debug_dma_sync_single_range_for_cpu(hwdev, dma_handle,
154 offset, size, dir);
Glauber Costa627610f2008-03-25 18:36:27 -0300155 flush_write_buffers();
156}
Glauber Costa71362332008-03-25 18:36:28 -0300157
158static inline void
159dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
160 unsigned long offset, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900161 enum dma_data_direction dir)
Glauber Costa71362332008-03-25 18:36:28 -0300162{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900163 struct dma_map_ops *ops = get_dma_ops(hwdev);
Glauber Costa71362332008-03-25 18:36:28 -0300164
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900165 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700166 if (ops->sync_single_range_for_device)
167 ops->sync_single_range_for_device(hwdev, dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900168 offset, size, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100169 debug_dma_sync_single_range_for_device(hwdev, dma_handle,
170 offset, size, dir);
Glauber Costa71362332008-03-25 18:36:28 -0300171 flush_write_buffers();
172}
173
Glauber Costaed435de2008-03-25 18:36:29 -0300174static inline void
175dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900176 int nelems, enum dma_data_direction dir)
Glauber Costaed435de2008-03-25 18:36:29 -0300177{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900178 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700179
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900180 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700181 if (ops->sync_sg_for_cpu)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900182 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100183 debug_dma_sync_sg_for_cpu(hwdev, sg, nelems, dir);
Glauber Costaed435de2008-03-25 18:36:29 -0300184 flush_write_buffers();
185}
Glauber Costae7f3a912008-03-25 18:36:30 -0300186
187static inline void
188dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900189 int nelems, enum dma_data_direction dir)
Glauber Costae7f3a912008-03-25 18:36:30 -0300190{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900191 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700192
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900193 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700194 if (ops->sync_sg_for_device)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900195 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100196 debug_dma_sync_sg_for_device(hwdev, sg, nelems, dir);
Glauber Costae7f3a912008-03-25 18:36:30 -0300197
198 flush_write_buffers();
199}
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300200
201static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
202 size_t offset, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900203 enum dma_data_direction dir)
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300204{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900205 struct dma_map_ops *ops = get_dma_ops(dev);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100206 dma_addr_t addr;
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700207
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900208 BUG_ON(!valid_dma_direction(dir));
Vegard Nossum9b5cab32009-02-21 13:52:37 +0100209 kmemcheck_mark_initialized(page_address(page) + offset, size);
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100210 addr = ops->map_page(dev, page, offset, size, dir, NULL);
211 debug_dma_map_page(dev, page, offset, size, dir, addr, false);
212
213 return addr;
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300214}
215
216static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900217 size_t size, enum dma_data_direction dir)
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300218{
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100219 struct dma_map_ops *ops = get_dma_ops(dev);
220
221 BUG_ON(!valid_dma_direction(dir));
222 if (ops->unmap_page)
223 ops->unmap_page(dev, addr, size, dir, NULL);
224 debug_dma_unmap_page(dev, addr, size, dir, false);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300225}
226
Glauber Costa3cb6a912008-03-25 18:36:33 -0300227static inline void
228dma_cache_sync(struct device *dev, void *vaddr, size_t size,
229 enum dma_data_direction dir)
230{
231 flush_write_buffers();
232}
Glauber Costaae17a632008-03-25 18:36:38 -0300233
Glauber Costab7107a32008-03-25 18:36:39 -0300234static inline int dma_get_cache_alignment(void)
235{
236 /* no easy way to get cache size on all x86, so return the
237 * maximum possible, to be safe */
238 return boot_cpu_data.x86_clflush_size;
239}
240
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900241static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
242 gfp_t gfp)
243{
244 unsigned long dma_mask = 0;
Glauber Costab7107a32008-03-25 18:36:39 -0300245
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900246 dma_mask = dev->coherent_dma_mask;
247 if (!dma_mask)
Yang Hongyang2f4f27d2009-04-06 19:01:18 -0700248 dma_mask = (gfp & GFP_DMA) ? DMA_BIT_MASK(24) : DMA_BIT_MASK(32);
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900249
250 return dma_mask;
251}
252
253static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
254{
255 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
256
Yang Hongyang2f4f27d2009-04-06 19:01:18 -0700257 if (dma_mask <= DMA_BIT_MASK(24))
FUJITA Tomonori75bebb72008-10-23 20:46:55 +0900258 gfp |= GFP_DMA;
259#ifdef CONFIG_X86_64
Yang Hongyang284901a2009-04-06 19:01:15 -0700260 if (dma_mask <= DMA_BIT_MASK(32) && !(gfp & GFP_DMA))
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900261 gfp |= GFP_DMA32;
262#endif
263 return gfp;
264}
265
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200266static inline void *
267dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
268 gfp_t gfp)
269{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900270 struct dma_map_ops *ops = get_dma_ops(dev);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200271 void *memory;
Glauber Costaae17a632008-03-25 18:36:38 -0300272
FUJITA Tomonori8a53ad62008-09-08 18:10:12 +0900273 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
274
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200275 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
276 return memory;
277
278 if (!dev) {
279 dev = &x86_dma_fallback_dev;
280 gfp |= GFP_DMA;
281 }
282
FUJITA Tomonori98216262008-09-10 00:49:48 +0900283 if (!is_device_dma_capable(dev))
FUJITA Tomonoride9f5212008-09-08 18:10:11 +0900284 return NULL;
285
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900286 if (!ops->alloc_coherent)
287 return NULL;
288
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100289 memory = ops->alloc_coherent(dev, size, dma_handle,
290 dma_alloc_coherent_gfp_flags(dev, gfp));
291 debug_dma_alloc_coherent(dev, size, *dma_handle, memory);
292
293 return memory;
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200294}
295
296static inline void dma_free_coherent(struct device *dev, size_t size,
297 void *vaddr, dma_addr_t bus)
298{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900299 struct dma_map_ops *ops = get_dma_ops(dev);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200300
301 WARN_ON(irqs_disabled()); /* for portability */
302
303 if (dma_release_from_coherent(dev, get_order(size), vaddr))
304 return;
305
Joerg Roedel2118d0c2009-01-09 15:13:15 +0100306 debug_dma_free_coherent(dev, size, vaddr, bus);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200307 if (ops->free_coherent)
308 ops->free_coherent(dev, size, vaddr, bus);
309}
310
Glauber Costa6f536632008-03-25 18:36:20 -0300311#endif