blob: 8cef663c5cd9fdc526400960a2d68bf1b3db75bf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* Copyright (C) 2002 by James.Bottomley@HansenPartnership.com
2 *
3 * Implements the generic device dma API via the existing pci_ one
4 * for unconverted architectures
5 */
6
7#ifndef _ASM_GENERIC_DMA_MAPPING_H
8#define _ASM_GENERIC_DMA_MAPPING_H
9
10#include <linux/config.h>
11
12#ifdef CONFIG_PCI
13
14/* we implement the API below in terms of the existing PCI one,
15 * so include it */
16#include <linux/pci.h>
17/* need struct page definitions */
18#include <linux/mm.h>
19
20static inline int
21dma_supported(struct device *dev, u64 mask)
22{
23 BUG_ON(dev->bus != &pci_bus_type);
24
25 return pci_dma_supported(to_pci_dev(dev), mask);
26}
27
28static inline int
29dma_set_mask(struct device *dev, u64 dma_mask)
30{
31 BUG_ON(dev->bus != &pci_bus_type);
32
33 return pci_set_dma_mask(to_pci_dev(dev), dma_mask);
34}
35
36static inline void *
37dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
38 unsigned int __nocast flag)
39{
40 BUG_ON(dev->bus != &pci_bus_type);
41
42 return pci_alloc_consistent(to_pci_dev(dev), size, dma_handle);
43}
44
45static inline void
46dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
47 dma_addr_t dma_handle)
48{
49 BUG_ON(dev->bus != &pci_bus_type);
50
51 pci_free_consistent(to_pci_dev(dev), size, cpu_addr, dma_handle);
52}
53
54static inline dma_addr_t
55dma_map_single(struct device *dev, void *cpu_addr, size_t size,
56 enum dma_data_direction direction)
57{
58 BUG_ON(dev->bus != &pci_bus_type);
59
60 return pci_map_single(to_pci_dev(dev), cpu_addr, size, (int)direction);
61}
62
63static inline void
64dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
65 enum dma_data_direction direction)
66{
67 BUG_ON(dev->bus != &pci_bus_type);
68
69 pci_unmap_single(to_pci_dev(dev), dma_addr, size, (int)direction);
70}
71
72static inline dma_addr_t
73dma_map_page(struct device *dev, struct page *page,
74 unsigned long offset, size_t size,
75 enum dma_data_direction direction)
76{
77 BUG_ON(dev->bus != &pci_bus_type);
78
79 return pci_map_page(to_pci_dev(dev), page, offset, size, (int)direction);
80}
81
82static inline void
83dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
84 enum dma_data_direction direction)
85{
86 BUG_ON(dev->bus != &pci_bus_type);
87
88 pci_unmap_page(to_pci_dev(dev), dma_address, size, (int)direction);
89}
90
91static inline int
92dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
93 enum dma_data_direction direction)
94{
95 BUG_ON(dev->bus != &pci_bus_type);
96
97 return pci_map_sg(to_pci_dev(dev), sg, nents, (int)direction);
98}
99
100static inline void
101dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
102 enum dma_data_direction direction)
103{
104 BUG_ON(dev->bus != &pci_bus_type);
105
106 pci_unmap_sg(to_pci_dev(dev), sg, nhwentries, (int)direction);
107}
108
109static inline void
110dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
111 enum dma_data_direction direction)
112{
113 BUG_ON(dev->bus != &pci_bus_type);
114
115 pci_dma_sync_single_for_cpu(to_pci_dev(dev), dma_handle,
116 size, (int)direction);
117}
118
119static inline void
120dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
121 enum dma_data_direction direction)
122{
123 BUG_ON(dev->bus != &pci_bus_type);
124
125 pci_dma_sync_single_for_device(to_pci_dev(dev), dma_handle,
126 size, (int)direction);
127}
128
129static inline void
130dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
131 enum dma_data_direction direction)
132{
133 BUG_ON(dev->bus != &pci_bus_type);
134
135 pci_dma_sync_sg_for_cpu(to_pci_dev(dev), sg, nelems, (int)direction);
136}
137
138static inline void
139dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
140 enum dma_data_direction direction)
141{
142 BUG_ON(dev->bus != &pci_bus_type);
143
144 pci_dma_sync_sg_for_device(to_pci_dev(dev), sg, nelems, (int)direction);
145}
146
147static inline int
148dma_mapping_error(dma_addr_t dma_addr)
149{
150 return pci_dma_mapping_error(dma_addr);
151}
152
153
154#else
155
156static inline int
157dma_supported(struct device *dev, u64 mask)
158{
159 return 0;
160}
161
162static inline int
163dma_set_mask(struct device *dev, u64 dma_mask)
164{
165 BUG();
166 return 0;
167}
168
169static inline void *
170dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *dma_handle,
171 unsigned int __nocast flag)
172{
173 BUG();
174 return NULL;
175}
176
177static inline void
178dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
179 dma_addr_t dma_handle)
180{
181 BUG();
182}
183
184static inline dma_addr_t
185dma_map_single(struct device *dev, void *cpu_addr, size_t size,
186 enum dma_data_direction direction)
187{
188 BUG();
189 return 0;
190}
191
192static inline void
193dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
194 enum dma_data_direction direction)
195{
196 BUG();
197}
198
199static inline dma_addr_t
200dma_map_page(struct device *dev, struct page *page,
201 unsigned long offset, size_t size,
202 enum dma_data_direction direction)
203{
204 BUG();
205 return 0;
206}
207
208static inline void
209dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
210 enum dma_data_direction direction)
211{
212 BUG();
213}
214
215static inline int
216dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
217 enum dma_data_direction direction)
218{
219 BUG();
220 return 0;
221}
222
223static inline void
224dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
225 enum dma_data_direction direction)
226{
227 BUG();
228}
229
230static inline void
231dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle, size_t size,
232 enum dma_data_direction direction)
233{
234 BUG();
235}
236
237static inline void
238dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle, size_t size,
239 enum dma_data_direction direction)
240{
241 BUG();
242}
243
244static inline void
245dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
246 enum dma_data_direction direction)
247{
248 BUG();
249}
250
251static inline void
252dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
253 enum dma_data_direction direction)
254{
255 BUG();
256}
257
258static inline int
259dma_error(dma_addr_t dma_addr)
260{
261 return 0;
262}
263
264#endif
265
266/* Now for the API extensions over the pci_ one */
267
268#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
269#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
270#define dma_is_consistent(d) (1)
271
272static inline int
273dma_get_cache_alignment(void)
274{
275 /* no easy way to get cache size on all processors, so return
276 * the maximum possible, to be safe */
277 return (1 << L1_CACHE_SHIFT_MAX);
278}
279
280static inline void
281dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
282 unsigned long offset, size_t size,
283 enum dma_data_direction direction)
284{
285 /* just sync everything, that's all the pci API can do */
286 dma_sync_single_for_cpu(dev, dma_handle, offset+size, direction);
287}
288
289static inline void
290dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
291 unsigned long offset, size_t size,
292 enum dma_data_direction direction)
293{
294 /* just sync everything, that's all the pci API can do */
295 dma_sync_single_for_device(dev, dma_handle, offset+size, direction);
296}
297
298static inline void
299dma_cache_sync(void *vaddr, size_t size,
300 enum dma_data_direction direction)
301{
302 /* could define this in terms of the dma_cache ... operations,
303 * but if you get this on a platform, you should convert the platform
304 * to using the generic device DMA API */
305 BUG();
306}
307
308#endif
309