blob: c58ec1661df864d69c9ff46ebeed26e247727581 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_SPARC64_DMA_MAPPING_H
2#define _ASM_SPARC64_DMA_MAPPING_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5#ifdef CONFIG_PCI
David S. Miller42f14232006-05-23 02:07:22 -07006
7/* we implement the API below in terms of the existing PCI one,
8 * so include it */
9#include <linux/pci.h>
10/* need struct page definitions */
11#include <linux/mm.h>
12
David S. Miller9ac6d4a2007-05-14 02:56:03 -070013#include <asm/of_device.h>
14
David S. Miller42f14232006-05-23 02:07:22 -070015static inline int
16dma_supported(struct device *dev, u64 mask)
17{
David S. Millerf467b992007-06-12 23:53:03 -070018 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -070019
20 return pci_dma_supported(to_pci_dev(dev), mask);
21}
22
23static inline int
24dma_set_mask(struct device *dev, u64 dma_mask)
25{
David S. Millerf467b992007-06-12 23:53:03 -070026 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -070027
28 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
29}
30
31static inline void *
32dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
33 gfp_t flag)
34{
David S. Millerf467b992007-06-12 23:53:03 -070035 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -070036
37 return pci_iommu_ops->alloc_consistent(to_pci_dev(dev), size, dma_handle, flag);
38}
39
40static inline void
41dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
42 dma_addr_t dma_handle)
43{
David S. Millerf467b992007-06-12 23:53:03 -070044 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -070045
46 pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
47}
48
49static inline dma_addr_t
50dma_map_single(struct device *dev, void *cpu_addr, size_t size,
51 enum dma_data_direction direction)
52{
David S. Millerf467b992007-06-12 23:53:03 -070053 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -070054
55 return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
56}
57
58static inline void
59dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
60 enum dma_data_direction direction)
61{
David S. Millerf467b992007-06-12 23:53:03 -070062 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -070063
64 pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
65}
66
67static inline dma_addr_t
68dma_map_page(struct device *dev, struct page *page,
69 unsigned long offset, size_t size,
70 enum dma_data_direction direction)
71{
David S. Millerf467b992007-06-12 23:53:03 -070072 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -070073
74 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
75}
76
77static inline void
78dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
79 enum dma_data_direction direction)
80{
David S. Millerf467b992007-06-12 23:53:03 -070081 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -070082
83 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
84}
85
86static inline int
87dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
88 enum dma_data_direction direction)
89{
David S. Millerf467b992007-06-12 23:53:03 -070090 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -070091
92 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
93}
94
95static inline void
96dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
97 enum dma_data_direction direction)
98{
David S. Millerf467b992007-06-12 23:53:03 -070099 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -0700100
101 pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
102}
103
104static inline void
105dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
106 enum dma_data_direction direction)
107{
David S. Millerf467b992007-06-12 23:53:03 -0700108 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -0700109
110 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
111 size, (int)direction);
112}
113
114static inline void
115dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
116 enum dma_data_direction direction)
117{
David S. Millerf467b992007-06-12 23:53:03 -0700118 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -0700119
120 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
121 size, (int)direction);
122}
123
124static inline void
125dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
126 enum dma_data_direction direction)
127{
David S. Millerf467b992007-06-12 23:53:03 -0700128 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -0700129
130 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
131}
132
133static inline void
134dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
135 enum dma_data_direction direction)
136{
David S. Millerf467b992007-06-12 23:53:03 -0700137 BUG_ON(dev->bus != &pci_bus_type);
David S. Miller42f14232006-05-23 02:07:22 -0700138
139 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
140}
141
142static inline int
143dma_mapping_error(dma_addr_t dma_addr)
144{
145 return pci_dma_mapping_error(dma_addr);
146}
147
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148#else
149
150struct device;
David S. Millerf04dbac2007-06-04 23:32:23 -0700151struct page;
152struct scatterlist;
153
154static inline int
155dma_supported(struct device *dev, u64 mask)
156{
157 BUG();
158 return 0;
159}
160
161static inline int
162dma_set_mask(struct device *dev, u64 dma_mask)
163{
164 BUG();
165 return 0;
166}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168static inline void *dma_alloc_coherent(struct device *dev, size_t size,
Al Viro970a9e72005-10-21 03:21:53 -0400169 dma_addr_t *dma_handle, gfp_t flag)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170{
171 BUG();
172 return NULL;
173}
174
175static inline void dma_free_coherent(struct device *dev, size_t size,
176 void *vaddr, dma_addr_t dma_handle)
177{
178 BUG();
179}
180
David S. Millerf04dbac2007-06-04 23:32:23 -0700181static inline dma_addr_t
182dma_map_single(struct device *dev, void *cpu_addr, size_t size,
183 enum dma_data_direction direction)
184{
185 BUG();
186 return 0;
187}
188
189static inline void
190dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
191 enum dma_data_direction direction)
192{
193 BUG();
194}
195
196static inline dma_addr_t
197dma_map_page(struct device *dev, struct page *page,
198 unsigned long offset, size_t size,
199 enum dma_data_direction direction)
200{
201 BUG();
202 return 0;
203}
204
205static inline void
206dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
207 enum dma_data_direction direction)
208{
209 BUG();
210}
211
212static inline int
213dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
214 enum dma_data_direction direction)
215{
216 BUG();
217 return 0;
218}
219
220static inline void
221dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
222 enum dma_data_direction direction)
223{
224 BUG();
225}
226
Randy Dunlap72335892006-07-05 20:18:39 -0700227static inline void
228dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
229 enum dma_data_direction direction)
230{
231 BUG();
232}
233
234static inline void
235dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
236 enum dma_data_direction direction)
237{
238 BUG();
239}
240
David S. Millerf04dbac2007-06-04 23:32:23 -0700241static inline void
242dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
243 enum dma_data_direction direction)
244{
245 BUG();
246}
247
248static inline void
249dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
250 enum dma_data_direction direction)
251{
252 BUG();
253}
254
255static inline int
256dma_mapping_error(dma_addr_t dma_addr)
257{
258 BUG();
259 return 0;
260}
261
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262#endif /* PCI */
263
David S. Miller36321422006-06-25 02:07:52 -0700264
265/* Now for the API extensions over the pci_ one */
266
267#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
268#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
Ralf Baechlef67637e2006-12-06 20:38:54 -0800269#define dma_is_consistent(d, h) (1)
David S. Miller36321422006-06-25 02:07:52 -0700270
271static inline int
272dma_get_cache_alignment(void)
273{
274 /* no easy way to get cache size on all processors, so return
275 * the maximum possible, to be safe */
276 return (1 << INTERNODE_CACHE_SHIFT);
277}
278
279static inline void
280dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
281 unsigned long offset, size_t size,
282 enum dma_data_direction direction)
283{
284 /* just sync everything, that's all the pci API can do */
285 dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
286}
287
288static inline void
289dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
290 unsigned long offset, size_t size,
291 enum dma_data_direction direction)
292{
293 /* just sync everything, that's all the pci API can do */
294 dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
295}
296
297static inline void
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800298dma_cache_sync(struct device *dev, void *vaddr, size_t size,
David S. Miller36321422006-06-25 02:07:52 -0700299 enum dma_data_direction direction)
300{
301 /* could define this in terms of the dma_cache ... operations,
302 * but if you get this on a platform, you should convert the platform
303 * to using the generic device DMA API */
304 BUG();
305}
306
Linus Torvalds1da177e2005-04-16 15:20:36 -0700307#endif /* _ASM_SPARC64_DMA_MAPPING_H */