blob: de1395023cb169702979aeb31ad0aea30e760dc2 [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Anton Blanchard33ff9102007-10-16 14:54:33 -05009#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
Mark Nelson3affedc2008-07-05 05:05:42 +100016#include <linux/dma-attrs.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050017#include <asm/io.h>
18
19#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
20
21#ifdef CONFIG_NOT_COHERENT_CACHE
22/*
23 * DMA-consistent mapping functions for PowerPCs that don't support
24 * cache snooping. These allocate/free a region of uncached mapped
25 * memory space for use with DMA devices. Alternatively, you could
26 * allocate the space "normally" and use the cache management functions
27 * to ensure it is consistent.
28 */
29extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
30extern void __dma_free_coherent(size_t size, void *vaddr);
31extern void __dma_sync(void *vaddr, size_t size, int direction);
32extern void __dma_sync_page(struct page *page, unsigned long offset,
33 size_t size, int direction);
34
35#else /* ! CONFIG_NOT_COHERENT_CACHE */
36/*
37 * Cache coherent cores.
38 */
39
40#define __dma_alloc_coherent(gfp, size, handle) NULL
41#define __dma_free_coherent(size, addr) ((void)0)
42#define __dma_sync(addr, size, rw) ((void)0)
43#define __dma_sync_page(pg, off, sz, rw) ((void)0)
44
45#endif /* ! CONFIG_NOT_COHERENT_CACHE */
46
47#ifdef CONFIG_PPC64
48/*
49 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
50 */
51struct dma_mapping_ops {
52 void * (*alloc_coherent)(struct device *dev, size_t size,
53 dma_addr_t *dma_handle, gfp_t flag);
54 void (*free_coherent)(struct device *dev, size_t size,
55 void *vaddr, dma_addr_t dma_handle);
56 dma_addr_t (*map_single)(struct device *dev, void *ptr,
Mark Nelson3affedc2008-07-05 05:05:42 +100057 size_t size, enum dma_data_direction direction,
58 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050059 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
Mark Nelson3affedc2008-07-05 05:05:42 +100060 size_t size, enum dma_data_direction direction,
61 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050062 int (*map_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100063 int nents, enum dma_data_direction direction,
64 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050065 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100066 int nents, enum dma_data_direction direction,
67 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050068 int (*dma_supported)(struct device *dev, u64 mask);
69 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
70};
71
72static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
73{
74 /* We don't handle the NULL dev case for ISA for now. We could
75 * do it via an out of line call but it is not needed for now. The
76 * only ISA DMA device we support is the floppy and we have a hack
77 * in the floppy driver directly to get a device for us.
78 */
79 if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
80 return NULL;
81 return dev->archdata.dma_ops;
82}
83
Michael Ellerman1f62a162008-01-30 01:13:58 +110084static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops)
85{
86 dev->archdata.dma_ops = ops;
87}
88
Anton Blanchard33ff9102007-10-16 14:54:33 -050089static inline int dma_supported(struct device *dev, u64 mask)
90{
91 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
92
93 if (unlikely(dma_ops == NULL))
94 return 0;
95 if (dma_ops->dma_supported == NULL)
96 return 1;
97 return dma_ops->dma_supported(dev, mask);
98}
99
Michael Ellerman84631f32007-12-17 17:35:53 +1100100/* We have our own implementation of pci_set_dma_mask() */
101#define HAVE_ARCH_PCI_SET_DMA_MASK
102
Anton Blanchard33ff9102007-10-16 14:54:33 -0500103static inline int dma_set_mask(struct device *dev, u64 dma_mask)
104{
105 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
106
107 if (unlikely(dma_ops == NULL))
108 return -EIO;
109 if (dma_ops->set_dma_mask != NULL)
110 return dma_ops->set_dma_mask(dev, dma_mask);
111 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
112 return -EIO;
113 *dev->dma_mask = dma_mask;
114 return 0;
115}
116
Mark Nelson3affedc2008-07-05 05:05:42 +1000117static inline dma_addr_t dma_map_single_attrs(struct device *dev,
118 void *cpu_addr,
119 size_t size,
120 enum dma_data_direction direction,
121 struct dma_attrs *attrs)
122{
123 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
124
125 BUG_ON(!dma_ops);
126 return dma_ops->map_single(dev, cpu_addr, size, direction, attrs);
127}
128
129static inline void dma_unmap_single_attrs(struct device *dev,
130 dma_addr_t dma_addr,
131 size_t size,
132 enum dma_data_direction direction,
133 struct dma_attrs *attrs)
134{
135 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
136
137 BUG_ON(!dma_ops);
138 dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
139}
140
141static inline dma_addr_t dma_map_page_attrs(struct device *dev,
142 struct page *page,
143 unsigned long offset, size_t size,
144 enum dma_data_direction direction,
145 struct dma_attrs *attrs)
146{
147 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
148
149 BUG_ON(!dma_ops);
150 return dma_ops->map_single(dev, page_address(page) + offset, size,
151 direction, attrs);
152}
153
154static inline void dma_unmap_page_attrs(struct device *dev,
155 dma_addr_t dma_address,
156 size_t size,
157 enum dma_data_direction direction,
158 struct dma_attrs *attrs)
159{
160 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
161
162 BUG_ON(!dma_ops);
163 dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
164}
165
166static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
167 int nents, enum dma_data_direction direction,
168 struct dma_attrs *attrs)
169{
170 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
171
172 BUG_ON(!dma_ops);
173 return dma_ops->map_sg(dev, sg, nents, direction, attrs);
174}
175
176static inline void dma_unmap_sg_attrs(struct device *dev,
177 struct scatterlist *sg,
178 int nhwentries,
179 enum dma_data_direction direction,
180 struct dma_attrs *attrs)
181{
182 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
183
184 BUG_ON(!dma_ops);
185 dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
186}
187
Anton Blanchard33ff9102007-10-16 14:54:33 -0500188static inline void *dma_alloc_coherent(struct device *dev, size_t size,
189 dma_addr_t *dma_handle, gfp_t flag)
190{
191 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
192
193 BUG_ON(!dma_ops);
194 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
195}
196
197static inline void dma_free_coherent(struct device *dev, size_t size,
198 void *cpu_addr, dma_addr_t dma_handle)
199{
200 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
201
202 BUG_ON(!dma_ops);
203 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
204}
205
206static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
207 size_t size,
208 enum dma_data_direction direction)
209{
Mark Nelson3affedc2008-07-05 05:05:42 +1000210 return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500211}
212
213static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
214 size_t size,
215 enum dma_data_direction direction)
216{
Mark Nelson3affedc2008-07-05 05:05:42 +1000217 dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500218}
219
220static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
221 unsigned long offset, size_t size,
222 enum dma_data_direction direction)
223{
Mark Nelson3affedc2008-07-05 05:05:42 +1000224 return dma_map_page_attrs(dev, page, offset, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500225}
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100226
227static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
228 size_t size,
229 enum dma_data_direction direction)
230{
Mark Nelson3affedc2008-07-05 05:05:42 +1000231 dma_unmap_page_attrs(dev, dma_address, size, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100232}
233
234static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
235 int nents, enum dma_data_direction direction)
236{
Mark Nelson3affedc2008-07-05 05:05:42 +1000237 return dma_map_sg_attrs(dev, sg, nents, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100238}
239
240static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
241 int nhwentries,
242 enum dma_data_direction direction)
243{
Mark Nelson3affedc2008-07-05 05:05:42 +1000244 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100245}
246
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100247/*
248 * Available generic sets of operations
249 */
250extern struct dma_mapping_ops dma_iommu_ops;
251extern struct dma_mapping_ops dma_direct_ops;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100252
253#else /* CONFIG_PPC64 */
254
255#define dma_supported(dev, mask) (1)
256
257static inline int dma_set_mask(struct device *dev, u64 dma_mask)
258{
259 if (!dev->dma_mask || !dma_supported(dev, mask))
260 return -EIO;
261
262 *dev->dma_mask = dma_mask;
263
264 return 0;
265}
266
267static inline void *dma_alloc_coherent(struct device *dev, size_t size,
268 dma_addr_t * dma_handle,
269 gfp_t gfp)
270{
271#ifdef CONFIG_NOT_COHERENT_CACHE
272 return __dma_alloc_coherent(size, dma_handle, gfp);
273#else
274 void *ret;
275 /* ignore region specifiers */
276 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
277
278 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
279 gfp |= GFP_DMA;
280
281 ret = (void *)__get_free_pages(gfp, get_order(size));
282
283 if (ret != NULL) {
284 memset(ret, 0, size);
285 *dma_handle = virt_to_bus(ret);
286 }
287
288 return ret;
289#endif
290}
291
292static inline void
293dma_free_coherent(struct device *dev, size_t size, void *vaddr,
294 dma_addr_t dma_handle)
295{
296#ifdef CONFIG_NOT_COHERENT_CACHE
297 __dma_free_coherent(size, vaddr);
298#else
299 free_pages((unsigned long)vaddr, get_order(size));
300#endif
301}
302
303static inline dma_addr_t
304dma_map_single(struct device *dev, void *ptr, size_t size,
305 enum dma_data_direction direction)
306{
307 BUG_ON(direction == DMA_NONE);
308
309 __dma_sync(ptr, size, direction);
310
311 return virt_to_bus(ptr);
312}
313
Segher Boessenkoolf774216d2007-08-02 01:41:15 +1000314static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
315 size_t size,
316 enum dma_data_direction direction)
317{
318 /* We do nothing. */
319}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100320
321static inline dma_addr_t
322dma_map_page(struct device *dev, struct page *page,
323 unsigned long offset, size_t size,
324 enum dma_data_direction direction)
325{
326 BUG_ON(direction == DMA_NONE);
327
328 __dma_sync_page(page, offset, size, direction);
329
330 return page_to_bus(page) + offset;
331}
332
Segher Boessenkoolf774216d2007-08-02 01:41:15 +1000333static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
334 size_t size,
335 enum dma_data_direction direction)
336{
337 /* We do nothing. */
338}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100339
340static inline int
Jens Axboe78bdc312007-10-12 13:44:12 +0200341dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100342 enum dma_data_direction direction)
343{
Jens Axboe78bdc312007-10-12 13:44:12 +0200344 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100345 int i;
346
347 BUG_ON(direction == DMA_NONE);
348
Jens Axboe78bdc312007-10-12 13:44:12 +0200349 for_each_sg(sgl, sg, nents, i) {
Olof Johansson5edadbd2007-10-23 09:13:14 +0200350 BUG_ON(!sg_page(sg));
351 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
352 sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100353 }
354
355 return nents;
356}
357
Segher Boessenkoolf774216d2007-08-02 01:41:15 +1000358static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
359 int nhwentries,
360 enum dma_data_direction direction)
361{
362 /* We don't do anything here. */
363}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100364
365#endif /* CONFIG_PPC64 */
366
367static inline void dma_sync_single_for_cpu(struct device *dev,
368 dma_addr_t dma_handle, size_t size,
369 enum dma_data_direction direction)
370{
371 BUG_ON(direction == DMA_NONE);
372 __dma_sync(bus_to_virt(dma_handle), size, direction);
373}
374
375static inline void dma_sync_single_for_device(struct device *dev,
376 dma_addr_t dma_handle, size_t size,
377 enum dma_data_direction direction)
378{
379 BUG_ON(direction == DMA_NONE);
380 __dma_sync(bus_to_virt(dma_handle), size, direction);
381}
382
383static inline void dma_sync_sg_for_cpu(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200384 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100385 enum dma_data_direction direction)
386{
Jens Axboe78bdc312007-10-12 13:44:12 +0200387 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100388 int i;
389
390 BUG_ON(direction == DMA_NONE);
391
Jens Axboe78bdc312007-10-12 13:44:12 +0200392 for_each_sg(sgl, sg, nents, i)
Olof Johansson5edadbd2007-10-23 09:13:14 +0200393 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100394}
395
396static inline void dma_sync_sg_for_device(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200397 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100398 enum dma_data_direction direction)
399{
Jens Axboe78bdc312007-10-12 13:44:12 +0200400 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100401 int i;
402
403 BUG_ON(direction == DMA_NONE);
404
Jens Axboe78bdc312007-10-12 13:44:12 +0200405 for_each_sg(sgl, sg, nents, i)
Olof Johansson5edadbd2007-10-23 09:13:14 +0200406 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100407}
408
409static inline int dma_mapping_error(dma_addr_t dma_addr)
410{
411#ifdef CONFIG_PPC64
412 return (dma_addr == DMA_ERROR_CODE);
413#else
414 return 0;
415#endif
416}
417
418#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
419#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
420#ifdef CONFIG_NOT_COHERENT_CACHE
Ralf Baechlef67637e2006-12-06 20:38:54 -0800421#define dma_is_consistent(d, h) (0)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100422#else
Ralf Baechlef67637e2006-12-06 20:38:54 -0800423#define dma_is_consistent(d, h) (1)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100424#endif
425
426static inline int dma_get_cache_alignment(void)
427{
428#ifdef CONFIG_PPC64
429 /* no easy way to get cache size on all processors, so return
430 * the maximum possible, to be safe */
Ravikiran G Thirumalai1fd73c62006-01-08 01:01:28 -0800431 return (1 << INTERNODE_CACHE_SHIFT);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100432#else
433 /*
434 * Each processor family will define its own L1_CACHE_SHIFT,
435 * L1_CACHE_BYTES wraps to this, so this is always safe.
436 */
437 return L1_CACHE_BYTES;
438#endif
439}
440
441static inline void dma_sync_single_range_for_cpu(struct device *dev,
442 dma_addr_t dma_handle, unsigned long offset, size_t size,
443 enum dma_data_direction direction)
444{
445 /* just sync everything for now */
446 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
447}
448
449static inline void dma_sync_single_range_for_device(struct device *dev,
450 dma_addr_t dma_handle, unsigned long offset, size_t size,
451 enum dma_data_direction direction)
452{
453 /* just sync everything for now */
454 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
455}
456
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800457static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100458 enum dma_data_direction direction)
459{
460 BUG_ON(direction == DMA_NONE);
461 __dma_sync(vaddr, size, (int)direction);
462}
463
Arnd Bergmann88ced032005-12-16 22:43:46 +0100464#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100465#endif /* _ASM_DMA_MAPPING_H */