blob: bbefb69bfb671e03e866ef54af3c1948c41d9940 [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Anton Blanchard33ff9102007-10-16 14:54:33 -05009#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
16#include <asm/io.h>
17
18#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
19
20#ifdef CONFIG_NOT_COHERENT_CACHE
21/*
22 * DMA-consistent mapping functions for PowerPCs that don't support
23 * cache snooping. These allocate/free a region of uncached mapped
24 * memory space for use with DMA devices. Alternatively, you could
25 * allocate the space "normally" and use the cache management functions
26 * to ensure it is consistent.
27 */
28extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
29extern void __dma_free_coherent(size_t size, void *vaddr);
30extern void __dma_sync(void *vaddr, size_t size, int direction);
31extern void __dma_sync_page(struct page *page, unsigned long offset,
32 size_t size, int direction);
33
34#else /* ! CONFIG_NOT_COHERENT_CACHE */
35/*
36 * Cache coherent cores.
37 */
38
39#define __dma_alloc_coherent(gfp, size, handle) NULL
40#define __dma_free_coherent(size, addr) ((void)0)
41#define __dma_sync(addr, size, rw) ((void)0)
42#define __dma_sync_page(pg, off, sz, rw) ((void)0)
43
44#endif /* ! CONFIG_NOT_COHERENT_CACHE */
45
46#ifdef CONFIG_PPC64
47/*
48 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
49 */
50struct dma_mapping_ops {
51 void * (*alloc_coherent)(struct device *dev, size_t size,
52 dma_addr_t *dma_handle, gfp_t flag);
53 void (*free_coherent)(struct device *dev, size_t size,
54 void *vaddr, dma_addr_t dma_handle);
55 dma_addr_t (*map_single)(struct device *dev, void *ptr,
56 size_t size, enum dma_data_direction direction);
57 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
58 size_t size, enum dma_data_direction direction);
59 int (*map_sg)(struct device *dev, struct scatterlist *sg,
60 int nents, enum dma_data_direction direction);
61 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
62 int nents, enum dma_data_direction direction);
63 int (*dma_supported)(struct device *dev, u64 mask);
64 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
65};
66
67static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
68{
69 /* We don't handle the NULL dev case for ISA for now. We could
70 * do it via an out of line call but it is not needed for now. The
71 * only ISA DMA device we support is the floppy and we have a hack
72 * in the floppy driver directly to get a device for us.
73 */
74 if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
75 return NULL;
76 return dev->archdata.dma_ops;
77}
78
Michael Ellerman1f62a162008-01-30 01:13:58 +110079static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops)
80{
81 dev->archdata.dma_ops = ops;
82}
83
Anton Blanchard33ff9102007-10-16 14:54:33 -050084static inline int dma_supported(struct device *dev, u64 mask)
85{
86 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
87
88 if (unlikely(dma_ops == NULL))
89 return 0;
90 if (dma_ops->dma_supported == NULL)
91 return 1;
92 return dma_ops->dma_supported(dev, mask);
93}
94
Michael Ellerman84631f32007-12-17 17:35:53 +110095/* We have our own implementation of pci_set_dma_mask() */
96#define HAVE_ARCH_PCI_SET_DMA_MASK
97
Anton Blanchard33ff9102007-10-16 14:54:33 -050098static inline int dma_set_mask(struct device *dev, u64 dma_mask)
99{
100 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
101
102 if (unlikely(dma_ops == NULL))
103 return -EIO;
104 if (dma_ops->set_dma_mask != NULL)
105 return dma_ops->set_dma_mask(dev, dma_mask);
106 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
107 return -EIO;
108 *dev->dma_mask = dma_mask;
109 return 0;
110}
111
112static inline void *dma_alloc_coherent(struct device *dev, size_t size,
113 dma_addr_t *dma_handle, gfp_t flag)
114{
115 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
116
117 BUG_ON(!dma_ops);
118 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
119}
120
121static inline void dma_free_coherent(struct device *dev, size_t size,
122 void *cpu_addr, dma_addr_t dma_handle)
123{
124 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
125
126 BUG_ON(!dma_ops);
127 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
128}
129
130static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
131 size_t size,
132 enum dma_data_direction direction)
133{
134 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
135
136 BUG_ON(!dma_ops);
137 return dma_ops->map_single(dev, cpu_addr, size, direction);
138}
139
140static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
141 size_t size,
142 enum dma_data_direction direction)
143{
144 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
145
146 BUG_ON(!dma_ops);
147 dma_ops->unmap_single(dev, dma_addr, size, direction);
148}
149
150static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
151 unsigned long offset, size_t size,
152 enum dma_data_direction direction)
153{
154 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
155
156 BUG_ON(!dma_ops);
157 return dma_ops->map_single(dev, page_address(page) + offset, size,
158 direction);
159}
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100160
161static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
162 size_t size,
163 enum dma_data_direction direction)
164{
165 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
166
167 BUG_ON(!dma_ops);
168 dma_ops->unmap_single(dev, dma_address, size, direction);
169}
170
171static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
172 int nents, enum dma_data_direction direction)
173{
174 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
175
176 BUG_ON(!dma_ops);
177 return dma_ops->map_sg(dev, sg, nents, direction);
178}
179
180static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
181 int nhwentries,
182 enum dma_data_direction direction)
183{
184 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
185
186 BUG_ON(!dma_ops);
187 dma_ops->unmap_sg(dev, sg, nhwentries, direction);
188}
189
190
191/*
192 * Available generic sets of operations
193 */
194extern struct dma_mapping_ops dma_iommu_ops;
195extern struct dma_mapping_ops dma_direct_ops;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100196
197#else /* CONFIG_PPC64 */
198
199#define dma_supported(dev, mask) (1)
200
201static inline int dma_set_mask(struct device *dev, u64 dma_mask)
202{
203 if (!dev->dma_mask || !dma_supported(dev, mask))
204 return -EIO;
205
206 *dev->dma_mask = dma_mask;
207
208 return 0;
209}
210
211static inline void *dma_alloc_coherent(struct device *dev, size_t size,
212 dma_addr_t * dma_handle,
213 gfp_t gfp)
214{
215#ifdef CONFIG_NOT_COHERENT_CACHE
216 return __dma_alloc_coherent(size, dma_handle, gfp);
217#else
218 void *ret;
219 /* ignore region specifiers */
220 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
221
222 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
223 gfp |= GFP_DMA;
224
225 ret = (void *)__get_free_pages(gfp, get_order(size));
226
227 if (ret != NULL) {
228 memset(ret, 0, size);
229 *dma_handle = virt_to_bus(ret);
230 }
231
232 return ret;
233#endif
234}
235
236static inline void
237dma_free_coherent(struct device *dev, size_t size, void *vaddr,
238 dma_addr_t dma_handle)
239{
240#ifdef CONFIG_NOT_COHERENT_CACHE
241 __dma_free_coherent(size, vaddr);
242#else
243 free_pages((unsigned long)vaddr, get_order(size));
244#endif
245}
246
247static inline dma_addr_t
248dma_map_single(struct device *dev, void *ptr, size_t size,
249 enum dma_data_direction direction)
250{
251 BUG_ON(direction == DMA_NONE);
252
253 __dma_sync(ptr, size, direction);
254
255 return virt_to_bus(ptr);
256}
257
Segher Boessenkoolf774216d2007-08-02 01:41:15 +1000258static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
259 size_t size,
260 enum dma_data_direction direction)
261{
262 /* We do nothing. */
263}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100264
265static inline dma_addr_t
266dma_map_page(struct device *dev, struct page *page,
267 unsigned long offset, size_t size,
268 enum dma_data_direction direction)
269{
270 BUG_ON(direction == DMA_NONE);
271
272 __dma_sync_page(page, offset, size, direction);
273
274 return page_to_bus(page) + offset;
275}
276
Segher Boessenkoolf774216d2007-08-02 01:41:15 +1000277static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
278 size_t size,
279 enum dma_data_direction direction)
280{
281 /* We do nothing. */
282}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100283
284static inline int
Jens Axboe78bdc312007-10-12 13:44:12 +0200285dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100286 enum dma_data_direction direction)
287{
Jens Axboe78bdc312007-10-12 13:44:12 +0200288 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100289 int i;
290
291 BUG_ON(direction == DMA_NONE);
292
Jens Axboe78bdc312007-10-12 13:44:12 +0200293 for_each_sg(sgl, sg, nents, i) {
Olof Johansson5edadbd2007-10-23 09:13:14 +0200294 BUG_ON(!sg_page(sg));
295 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
296 sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100297 }
298
299 return nents;
300}
301
Segher Boessenkoolf774216d2007-08-02 01:41:15 +1000302static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
303 int nhwentries,
304 enum dma_data_direction direction)
305{
306 /* We don't do anything here. */
307}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100308
309#endif /* CONFIG_PPC64 */
310
311static inline void dma_sync_single_for_cpu(struct device *dev,
312 dma_addr_t dma_handle, size_t size,
313 enum dma_data_direction direction)
314{
315 BUG_ON(direction == DMA_NONE);
316 __dma_sync(bus_to_virt(dma_handle), size, direction);
317}
318
319static inline void dma_sync_single_for_device(struct device *dev,
320 dma_addr_t dma_handle, size_t size,
321 enum dma_data_direction direction)
322{
323 BUG_ON(direction == DMA_NONE);
324 __dma_sync(bus_to_virt(dma_handle), size, direction);
325}
326
327static inline void dma_sync_sg_for_cpu(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200328 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100329 enum dma_data_direction direction)
330{
Jens Axboe78bdc312007-10-12 13:44:12 +0200331 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100332 int i;
333
334 BUG_ON(direction == DMA_NONE);
335
Jens Axboe78bdc312007-10-12 13:44:12 +0200336 for_each_sg(sgl, sg, nents, i)
Olof Johansson5edadbd2007-10-23 09:13:14 +0200337 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100338}
339
340static inline void dma_sync_sg_for_device(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200341 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100342 enum dma_data_direction direction)
343{
Jens Axboe78bdc312007-10-12 13:44:12 +0200344 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100345 int i;
346
347 BUG_ON(direction == DMA_NONE);
348
Jens Axboe78bdc312007-10-12 13:44:12 +0200349 for_each_sg(sgl, sg, nents, i)
Olof Johansson5edadbd2007-10-23 09:13:14 +0200350 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100351}
352
353static inline int dma_mapping_error(dma_addr_t dma_addr)
354{
355#ifdef CONFIG_PPC64
356 return (dma_addr == DMA_ERROR_CODE);
357#else
358 return 0;
359#endif
360}
361
362#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
363#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
364#ifdef CONFIG_NOT_COHERENT_CACHE
Ralf Baechlef67637e2006-12-06 20:38:54 -0800365#define dma_is_consistent(d, h) (0)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100366#else
Ralf Baechlef67637e2006-12-06 20:38:54 -0800367#define dma_is_consistent(d, h) (1)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100368#endif
369
370static inline int dma_get_cache_alignment(void)
371{
372#ifdef CONFIG_PPC64
373 /* no easy way to get cache size on all processors, so return
374 * the maximum possible, to be safe */
Ravikiran G Thirumalai1fd73c62006-01-08 01:01:28 -0800375 return (1 << INTERNODE_CACHE_SHIFT);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100376#else
377 /*
378 * Each processor family will define its own L1_CACHE_SHIFT,
379 * L1_CACHE_BYTES wraps to this, so this is always safe.
380 */
381 return L1_CACHE_BYTES;
382#endif
383}
384
385static inline void dma_sync_single_range_for_cpu(struct device *dev,
386 dma_addr_t dma_handle, unsigned long offset, size_t size,
387 enum dma_data_direction direction)
388{
389 /* just sync everything for now */
390 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
391}
392
393static inline void dma_sync_single_range_for_device(struct device *dev,
394 dma_addr_t dma_handle, unsigned long offset, size_t size,
395 enum dma_data_direction direction)
396{
397 /* just sync everything for now */
398 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
399}
400
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800401static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100402 enum dma_data_direction direction)
403{
404 BUG_ON(direction == DMA_NONE);
405 __dma_sync(vaddr, size, (int)direction);
406}
407
Arnd Bergmann88ced032005-12-16 22:43:46 +0100408#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100409#endif /* _ASM_DMA_MAPPING_H */