blob: 8e16095d1fa9bbb4469219a43ef5f21c6c185c08 [file] [log] [blame]
Glauber Costa6f536632008-03-25 18:36:20 -03001#ifndef _ASM_DMA_MAPPING_H_
2#define _ASM_DMA_MAPPING_H_
3
4/*
5 * IOMMU interface. See Documentation/DMA-mapping.txt and DMA-API.txt for
6 * documentation.
7 */
8
9#include <linux/scatterlist.h>
10#include <asm/io.h>
11#include <asm/swiotlb.h>
Joerg Roedel6c505ce2008-08-19 16:32:45 +020012#include <asm-generic/dma-coherent.h>
Glauber Costa6f536632008-03-25 18:36:20 -030013
Glauber Costa7c183412008-03-25 18:36:36 -030014extern dma_addr_t bad_dma_address;
Glauber Costab7107a32008-03-25 18:36:39 -030015extern int iommu_merge;
Joerg Roedel6c505ce2008-08-19 16:32:45 +020016extern struct device x86_dma_fallback_dev;
Glauber Costab7107a32008-03-25 18:36:39 -030017extern int panic_on_overflow;
Glauber Costafae9a0d2008-04-08 13:20:56 -030018extern int force_iommu;
Glauber Costa7c183412008-03-25 18:36:36 -030019
Glauber Costa6f536632008-03-25 18:36:20 -030020struct dma_mapping_ops {
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070021 int (*mapping_error)(struct device *dev,
22 dma_addr_t dma_addr);
Glauber Costa6f536632008-03-25 18:36:20 -030023 void* (*alloc_coherent)(struct device *dev, size_t size,
24 dma_addr_t *dma_handle, gfp_t gfp);
25 void (*free_coherent)(struct device *dev, size_t size,
26 void *vaddr, dma_addr_t dma_handle);
Ingo Molnar2be62142008-04-19 19:19:56 +020027 dma_addr_t (*map_single)(struct device *hwdev, phys_addr_t ptr,
Glauber Costa6f536632008-03-25 18:36:20 -030028 size_t size, int direction);
29 /* like map_single, but doesn't check the device mask */
Ingo Molnar2be62142008-04-19 19:19:56 +020030 dma_addr_t (*map_simple)(struct device *hwdev, phys_addr_t ptr,
Glauber Costa6f536632008-03-25 18:36:20 -030031 size_t size, int direction);
32 void (*unmap_single)(struct device *dev, dma_addr_t addr,
33 size_t size, int direction);
34 void (*sync_single_for_cpu)(struct device *hwdev,
35 dma_addr_t dma_handle, size_t size,
36 int direction);
37 void (*sync_single_for_device)(struct device *hwdev,
38 dma_addr_t dma_handle, size_t size,
39 int direction);
40 void (*sync_single_range_for_cpu)(struct device *hwdev,
41 dma_addr_t dma_handle, unsigned long offset,
42 size_t size, int direction);
43 void (*sync_single_range_for_device)(struct device *hwdev,
44 dma_addr_t dma_handle, unsigned long offset,
45 size_t size, int direction);
46 void (*sync_sg_for_cpu)(struct device *hwdev,
47 struct scatterlist *sg, int nelems,
48 int direction);
49 void (*sync_sg_for_device)(struct device *hwdev,
50 struct scatterlist *sg, int nelems,
51 int direction);
52 int (*map_sg)(struct device *hwdev, struct scatterlist *sg,
53 int nents, int direction);
54 void (*unmap_sg)(struct device *hwdev,
55 struct scatterlist *sg, int nents,
56 int direction);
57 int (*dma_supported)(struct device *hwdev, u64 mask);
58 int is_phys;
59};
60
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070061extern struct dma_mapping_ops *dma_ops;
Glauber Costa22456b92008-03-25 18:36:21 -030062
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070063static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
Glauber Costac786df02008-03-25 18:36:37 -030064{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070065#ifdef CONFIG_X86_32
66 return dma_ops;
67#else
68 if (unlikely(!dev) || !dev->archdata.dma_ops)
69 return dma_ops;
70 else
71 return dev->archdata.dma_ops;
72#endif
73}
74
75/* Make sure we keep the same behaviour */
76static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
77{
78#ifdef CONFIG_X86_32
79 return 0;
80#else
81 struct dma_mapping_ops *ops = get_dma_ops(dev);
82 if (ops->mapping_error)
83 return ops->mapping_error(dev, dma_addr);
Glauber Costac786df02008-03-25 18:36:37 -030084
85 return (dma_addr == bad_dma_address);
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -070086#endif
Glauber Costac786df02008-03-25 18:36:37 -030087}
88
Glauber Costa8d396de2008-03-25 18:36:31 -030089#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
90#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Joerg Roedel6c505ce2008-08-19 16:32:45 +020091#define dma_is_consistent(d, h) (1)
Glauber Costa8d396de2008-03-25 18:36:31 -030092
Glauber Costa802c1f62008-03-25 18:36:34 -030093extern int dma_supported(struct device *hwdev, u64 mask);
94extern int dma_set_mask(struct device *dev, u64 mask);
95
Glauber Costa22456b92008-03-25 18:36:21 -030096static inline dma_addr_t
97dma_map_single(struct device *hwdev, void *ptr, size_t size,
98 int direction)
99{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700100 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
101
Glauber Costa22456b92008-03-25 18:36:21 -0300102 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700103 return ops->map_single(hwdev, virt_to_phys(ptr), size, direction);
Glauber Costa22456b92008-03-25 18:36:21 -0300104}
105
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300106static inline void
107dma_unmap_single(struct device *dev, dma_addr_t addr, size_t size,
108 int direction)
109{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700110 struct dma_mapping_ops *ops = get_dma_ops(dev);
111
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300112 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700113 if (ops->unmap_single)
114 ops->unmap_single(dev, addr, size, direction);
Glauber Costa0cb0ae62008-03-25 18:36:22 -0300115}
116
Glauber Costa16a3ce92008-03-25 18:36:23 -0300117static inline int
118dma_map_sg(struct device *hwdev, struct scatterlist *sg,
119 int nents, int direction)
120{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700121 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
122
Glauber Costa16a3ce92008-03-25 18:36:23 -0300123 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700124 return ops->map_sg(hwdev, sg, nents, direction);
Glauber Costa16a3ce92008-03-25 18:36:23 -0300125}
Glauber Costa72c784f2008-03-25 18:36:24 -0300126
127static inline void
128dma_unmap_sg(struct device *hwdev, struct scatterlist *sg, int nents,
129 int direction)
130{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700131 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
132
Glauber Costa72c784f2008-03-25 18:36:24 -0300133 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700134 if (ops->unmap_sg)
135 ops->unmap_sg(hwdev, sg, nents, direction);
Glauber Costa72c784f2008-03-25 18:36:24 -0300136}
Glauber Costac01dd8c2008-03-25 18:36:25 -0300137
138static inline void
139dma_sync_single_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
140 size_t size, int direction)
141{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700142 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
143
Glauber Costac01dd8c2008-03-25 18:36:25 -0300144 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700145 if (ops->sync_single_for_cpu)
146 ops->sync_single_for_cpu(hwdev, dma_handle, size, direction);
Glauber Costac01dd8c2008-03-25 18:36:25 -0300147 flush_write_buffers();
148}
149
Glauber Costa9231b262008-03-25 18:36:26 -0300150static inline void
151dma_sync_single_for_device(struct device *hwdev, dma_addr_t dma_handle,
152 size_t size, int direction)
153{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700154 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
155
Glauber Costa9231b262008-03-25 18:36:26 -0300156 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700157 if (ops->sync_single_for_device)
158 ops->sync_single_for_device(hwdev, dma_handle, size, direction);
Glauber Costa9231b262008-03-25 18:36:26 -0300159 flush_write_buffers();
160}
161
Glauber Costa627610f2008-03-25 18:36:27 -0300162static inline void
163dma_sync_single_range_for_cpu(struct device *hwdev, dma_addr_t dma_handle,
164 unsigned long offset, size_t size, int direction)
165{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700166 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
Glauber Costa627610f2008-03-25 18:36:27 -0300167
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700168 BUG_ON(!valid_dma_direction(direction));
169 if (ops->sync_single_range_for_cpu)
170 ops->sync_single_range_for_cpu(hwdev, dma_handle, offset,
171 size, direction);
Glauber Costa627610f2008-03-25 18:36:27 -0300172 flush_write_buffers();
173}
Glauber Costa71362332008-03-25 18:36:28 -0300174
175static inline void
176dma_sync_single_range_for_device(struct device *hwdev, dma_addr_t dma_handle,
177 unsigned long offset, size_t size,
178 int direction)
179{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700180 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
Glauber Costa71362332008-03-25 18:36:28 -0300181
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700182 BUG_ON(!valid_dma_direction(direction));
183 if (ops->sync_single_range_for_device)
184 ops->sync_single_range_for_device(hwdev, dma_handle,
185 offset, size, direction);
Glauber Costa71362332008-03-25 18:36:28 -0300186 flush_write_buffers();
187}
188
Glauber Costaed435de2008-03-25 18:36:29 -0300189static inline void
190dma_sync_sg_for_cpu(struct device *hwdev, struct scatterlist *sg,
191 int nelems, int direction)
192{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700193 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
194
Glauber Costaed435de2008-03-25 18:36:29 -0300195 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700196 if (ops->sync_sg_for_cpu)
197 ops->sync_sg_for_cpu(hwdev, sg, nelems, direction);
Glauber Costaed435de2008-03-25 18:36:29 -0300198 flush_write_buffers();
199}
Glauber Costae7f3a912008-03-25 18:36:30 -0300200
201static inline void
202dma_sync_sg_for_device(struct device *hwdev, struct scatterlist *sg,
203 int nelems, int direction)
204{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700205 struct dma_mapping_ops *ops = get_dma_ops(hwdev);
206
Glauber Costae7f3a912008-03-25 18:36:30 -0300207 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700208 if (ops->sync_sg_for_device)
209 ops->sync_sg_for_device(hwdev, sg, nelems, direction);
Glauber Costae7f3a912008-03-25 18:36:30 -0300210
211 flush_write_buffers();
212}
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300213
214static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
215 size_t offset, size_t size,
216 int direction)
217{
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700218 struct dma_mapping_ops *ops = get_dma_ops(dev);
219
Ingo Molnar2be62142008-04-19 19:19:56 +0200220 BUG_ON(!valid_dma_direction(direction));
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700221 return ops->map_single(dev, page_to_phys(page) + offset,
222 size, direction);
Glauber Costa4d92fbf2008-03-25 18:36:32 -0300223}
224
225static inline void dma_unmap_page(struct device *dev, dma_addr_t addr,
226 size_t size, int direction)
227{
228 dma_unmap_single(dev, addr, size, direction);
229}
230
Glauber Costa3cb6a912008-03-25 18:36:33 -0300231static inline void
232dma_cache_sync(struct device *dev, void *vaddr, size_t size,
233 enum dma_data_direction dir)
234{
235 flush_write_buffers();
236}
Glauber Costaae17a63b2008-03-25 18:36:38 -0300237
Glauber Costab7107a32008-03-25 18:36:39 -0300238static inline int dma_get_cache_alignment(void)
239{
240 /* no easy way to get cache size on all x86, so return the
241 * maximum possible, to be safe */
242 return boot_cpu_data.x86_clflush_size;
243}
244
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200245static inline void *
246dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
247 gfp_t gfp)
248{
249 struct dma_mapping_ops *ops = get_dma_ops(dev);
250 void *memory;
Glauber Costab7107a32008-03-25 18:36:39 -0300251
Joerg Roedel6c505ce2008-08-19 16:32:45 +0200252 if (dma_alloc_from_coherent(dev, size, dma_handle, &memory))
253 return memory;
254
255 if (!dev) {
256 dev = &x86_dma_fallback_dev;
257 gfp |= GFP_DMA;
258 }
259
260 if (ops->alloc_coherent)
261 return ops->alloc_coherent(dev, size,
262 dma_handle, gfp);
263 return NULL;
264}
265
266static inline void dma_free_coherent(struct device *dev, size_t size,
267 void *vaddr, dma_addr_t bus)
268{
269 struct dma_mapping_ops *ops = get_dma_ops(dev);
270
271 WARN_ON(irqs_disabled()); /* for portability */
272
273 if (dma_release_from_coherent(dev, get_order(size), vaddr))
274 return;
275
276 if (ops->free_coherent)
277 ops->free_coherent(dev, size, vaddr, bus);
278}
279
Glauber Costa6f536632008-03-25 18:36:20 -0300280#endif