blob: 9c78bd40ebec8ae1b6c7b3fed22192ea580fc8fa [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_DMA_MAPPING_H
2#define _ASM_X86_DMA_MAPPING_H
Glauber Costa6f536632008-03-25 18:36:20 -03003
4/*
Randy Dunlap5872fb92009-01-29 16:28:02 -08005 * IOMMU interface. See Documentation/PCI/PCI-DMA-mapping.txt and
6 * Documentation/DMA-API.txt for documentation.
Glauber Costa6f536632008-03-25 18:36:20 -03007 */
8
9#include <linux/scatterlist.h>
FUJITA Tomonoriabe66022009-01-05 23:47:21 +090010#include <linux/dma-attrs.h>
Glauber Costa6f536632008-03-25 18:36:20 -030011#include <asm/io.h>
12#include <asm/swiotlb.h>
Joerg Roedel6c505ce2008-08-19 16:32:45 +020013#include <asm-generic/dma-coherent.h>
Glauber Costa6f536632008-03-25 18:36:20 -030014
Glauber Costa7c183412008-03-25 18:36:36 -030015extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030016extern int iommu_merge;
Joerg Roedel6c505ce2008-08-19 16:32:45 +020017extern struct device x86_dma_fallback_dev;
Glauber Costab7107a32008-03-25 18:36:39 -030018extern int panic_on_overflow;
Glauber Costa7c183412008-03-25 18:36:36 -030019
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090020extern struct dma_map_ops *dma_ops;
Glauber Costa6f536632008-03-25 18:36:20 -030021
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090022static inline struct dma_map_ops *get_dma_ops(struct device *dev)
Glauber Costac786df02008-03-25 18:36:37 -030023{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070024#ifdef CONFIG_X86_32
25 return dma_ops;
26#else
27 if (unlikely(!dev) || !dev->archdata.dma_ops)
28 return dma_ops;
29 else
30 return dev->archdata.dma_ops;
Jeremy Fitzhardingecfb80c92008-12-16 12:17:36 -080031#endif
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070032}
33
34/* Make sure we keep the same behaviour */
35static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
36{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090037 struct dma_map_ops *ops = get_dma_ops(dev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070038 if (ops->mapping_error)
39 return ops->mapping_error(dev, dma_addr);
Glauber Costac786df02008-03-25 18:36:37 -030040
Thomas Bogendoerfer7b1dedc2008-11-29 13:46:27 +010041 return (dma_addr == bad_dma_address);
Glauber Costac786df02008-03-25 18:36:37 -030042}
43
Glauber Costa8d396de2008-03-25 18:36:31 -030044#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
45#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Joerg Roedel6c505ce2008-08-19 16:32:45 +020046#define dma_is_consistent(d, h) (1)
Glauber Costa8d396de2008-03-25 18:36:31 -030047
Glauber Costa802c1f62008-03-25 18:36:34 -030048extern int dma_supported(struct device *hwdev, u64 mask);
49extern int dma_set_mask(struct device *dev, u64 mask);
50
FUJITA Tomonori9f6ac572008-09-24 20:48:35 +090051extern void *dma_generic_alloc_coherent(struct device *dev, size_t size,
52 dma_addr_t *dma_addr, gfp_t flag);
53
Glauber Costa22456b92008-03-25 18:36:21 -030054static inline dma_addr_t
55dma_map_single(struct device *hwdev, void *ptr, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090056 enum dma_data_direction dir)
Glauber Costa22456b92008-03-25 18:36:21 -030057{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090058 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070059
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090060 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonorid7dff842009-01-05 23:47:28 +090061 return ops->map_page(hwdev, virt_to_page(ptr),
62 (unsigned long)ptr & ~PAGE_MASK, size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090063 dir, NULL);
Glauber Costa22456b92008-03-25 18:36:21 -030064}
65
Glauber Costa0cb0ae62008-03-25 18:36:22 -030066static inline void
67dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090068 enum dma_data_direction dir)
Glauber Costa0cb0ae62008-03-25 18:36:22 -030069{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090070 struct dma_map_ops *ops = get_dma_ops(dev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070071
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090072 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonorid7dff842009-01-05 23:47:28 +090073 if (ops->unmap_page)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090074 ops->unmap_page(dev, addr, size, dir, NULL);
Glauber Costa0cb0ae62008-03-25 18:36:22 -030075}
76
Glauber Costa16a3ce92008-03-25 18:36:23 -030077static inline int
78dma_map_sg(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090079 int nents, enum dma_data_direction dir)
Glauber Costa16a3ce92008-03-25 18:36:23 -030080{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090081 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070082
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090083 BUG_ON(!valid_dma_direction(dir));
84 return ops->map_sg(hwdev, sg, nents, dir, NULL);
Glauber Costa16a3ce92008-03-25 18:36:23 -030085}
Glauber Costa72c784f2008-03-25 18:36:24 -030086
87static inline void
88dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090089 enum dma_data_direction dir)
Glauber Costa72c784f2008-03-25 18:36:24 -030090{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090091 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070092
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090093 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070094 if (ops->unmap_sg)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +090095 ops->unmap_sg(hwdev, sg, nents, dir, NULL);
Glauber Costa72c784f2008-03-25 18:36:24 -030096}
Glauber Costac01dd8c2008-03-25 18:36:25 -030097
98static inline void
99dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900100 size_t size, enum dma_data_direction dir)
Glauber Costac01dd8c2008-03-25 18:36:25 -0300101{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900102 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700103
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900104 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700105 if (ops->sync_single_for_cpu)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900106 ops->sync_single_for_cpu(hwdev, dma_handle, size, dir);
Glauber Costac01dd8c2008-03-25 18:36:25 -0300107 flush_write_buffers();
108}
109
Glauber Costa9231b262008-03-25 18:36:26 -0300110static inline void
111dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900112 size_t size, enum dma_data_direction dir)
Glauber Costa9231b262008-03-25 18:36:26 -0300113{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900114 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700115
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900116 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700117 if (ops->sync_single_for_device)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900118 ops->sync_single_for_device(hwdev, dma_handle, size, dir);
Glauber Costa9231b262008-03-25 18:36:26 -0300119 flush_write_buffers();
120}
121
Glauber Costa627610f2008-03-25 18:36:27 -0300122static inline void
123dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900124 unsigned long offset, size_t size,
125 enum dma_data_direction dir)
Glauber Costa627610f2008-03-25 18:36:27 -0300126{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900127 struct dma_map_ops *ops = get_dma_ops(hwdev);
Glauber Costa627610f2008-03-25 18:36:27 -0300128
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900129 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700130 if (ops->sync_single_range_for_cpu)
131 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900132 size, dir);
Glauber Costa627610f2008-03-25 18:36:27 -0300133 flush_write_buffers();
134}
Glauber Costa71362332008-03-25 18:36:28 -0300135
136static inline void
137dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
138 unsigned long offset, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900139 enum dma_data_direction dir)
Glauber Costa71362332008-03-25 18:36:28 -0300140{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900141 struct dma_map_ops *ops = get_dma_ops(hwdev);
Glauber Costa71362332008-03-25 18:36:28 -0300142
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900143 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700144 if (ops->sync_single_range_for_device)
145 ops->sync_single_range_for_device(hwdev, dma_handle,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900146 offset, size, dir);
Glauber Costa71362332008-03-25 18:36:28 -0300147 flush_write_buffers();
148}
149
Glauber Costaed435de2008-03-25 18:36:29 -0300150static inline void
151dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900152 int nelems, enum dma_data_direction dir)
Glauber Costaed435de2008-03-25 18:36:29 -0300153{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900154 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700155
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900156 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700157 if (ops->sync_sg_for_cpu)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900158 ops->sync_sg_for_cpu(hwdev, sg, nelems, dir);
Glauber Costaed435de2008-03-25 18:36:29 -0300159 flush_write_buffers();
160}
Glauber Costae7f3a912008-03-25 18:36:30 -0300161
162static inline void
163dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900164 int nelems, enum dma_data_direction dir)
Glauber Costae7f3a912008-03-25 18:36:30 -0300165{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900166 struct dma_map_ops *ops = get_dma_ops(hwdev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700167
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900168 BUG_ON(!valid_dma_direction(dir));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700169 if (ops->sync_sg_for_device)
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900170 ops->sync_sg_for_device(hwdev, sg, nelems, dir);
Glauber Costae7f3a912008-03-25 18:36:30 -0300171
172 flush_write_buffers();
173}
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300174
175static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
176 size_t offset, size_t size,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900177 enum dma_data_direction dir)
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300178{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900179 struct dma_map_ops *ops = get_dma_ops(dev);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700180
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900181 BUG_ON(!valid_dma_direction(dir));
182 return ops->map_page(dev, page, offset, size, dir, NULL);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300183}
184
185static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900186 size_t size, enum dma_data_direction dir)
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300187{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900188 dma_unmap_single(dev, addr, size, dir);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300189}
190
Glauber Costa3cb6a912008-03-25 18:36:33 -0300191static inline void
192dma_cache_sync(struct device *dev, void *vaddr, size_t size,
193 enum dma_data_direction dir)
194{
195 flush_write_buffers();
196}
Glauber Costaae17a63b2008-03-25 18:36:38 -0300197
Glauber Costab7107a32008-03-25 18:36:39 -0300198static inline int dma_get_cache_alignment(void)
199{
200 /* no easy way to get cache size on all x86, so return the
201 * maximum possible, to be safe */
202 return boot_cpu_data.x86_clflush_size;
203}
204
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900205static inline unsigned long dma_alloc_coherent_mask(struct device *dev,
206 gfp_t gfp)
207{
208 unsigned long dma_mask = 0;
Glauber Costab7107a32008-03-25 18:36:39 -0300209
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900210 dma_mask = dev->coherent_dma_mask;
211 if (!dma_mask)
212 dma_mask = (gfp & GFP_DMA) ? DMA_24BIT_MASK : DMA_32BIT_MASK;
213
214 return dma_mask;
215}
216
217static inline gfp_t dma_alloc_coherent_gfp_flags(struct device *dev, gfp_t gfp)
218{
219 unsigned long dma_mask = dma_alloc_coherent_mask(dev, gfp);
220
FUJITA Tomonori75bebb72008-10-23 20:46:55 +0900221 if (dma_mask <= DMA_24BIT_MASK)
222 gfp |= GFP_DMA;
223#ifdef CONFIG_X86_64
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900224 if (dma_mask <= DMA_32BIT_MASK && !(gfp & GFP_DMA))
225 gfp |= GFP_DMA32;
226#endif
227 return gfp;
228}
229
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200230static inline void *
231dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
232 gfp_t gfp)
233{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900234 struct dma_map_ops *ops = get_dma_ops(dev);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200235 void *memory;
Glauber Costaae17a63b2008-03-25 18:36:38 -0300236
FUJITA Tomonori8a53ad62008-09-08 18:10:12 +0900237 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM | __GFP_DMA32);
238
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200239 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
240 return memory;
241
242 if (!dev) {
243 dev = &x86_dma_fallback_dev;
244 gfp |= GFP_DMA;
245 }
246
FUJITA Tomonori98216262008-09-10 00:49:48 +0900247 if (!is_device_dma_capable(dev))
FUJITA Tomonoride9f5212008-09-08 18:10:11 +0900248 return NULL;
249
FUJITA Tomonori823e7e82008-09-08 18:10:13 +0900250 if (!ops->alloc_coherent)
251 return NULL;
252
253 return ops->alloc_coherent(dev, size, dma_handle,
254 dma_alloc_coherent_gfp_flags(dev, gfp));
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200255}
256
257static inline void dma_free_coherent(struct device *dev, size_t size,
258 void *vaddr, dma_addr_t bus)
259{
FUJITA Tomonori160c1d82009-01-05 23:59:02 +0900260 struct dma_map_ops *ops = get_dma_ops(dev);
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200261
262 WARN_ON(irqs_disabled()); /* for portability */
263
264 if (dma_release_from_coherent(dev, get_order(size), vaddr))
265 return;
266
267 if (ops->free_coherent)
268 ops->free_coherent(dev, size, vaddr, bus);
269}
270
Glauber Costa6f536632008-03-25 18:36:20 -0300271#endif