blob: 74c549780987ba8e9feb48a299c9455c32cfce93 [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Anton Blanchard33ff9102007-10-16 14:54:33 -05009#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
Mark Nelson3affedc2008-07-05 05:05:42 +100016#include <linux/dma-attrs.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050017#include <asm/io.h>
18
19#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
20
21#ifdef CONFIG_NOT_COHERENT_CACHE
22/*
23 * DMA-consistent mapping functions for PowerPCs that don't support
24 * cache snooping. These allocate/free a region of uncached mapped
25 * memory space for use with DMA devices. Alternatively, you could
26 * allocate the space "normally" and use the cache management functions
27 * to ensure it is consistent.
28 */
29extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
30extern void __dma_free_coherent(size_t size, void *vaddr);
31extern void __dma_sync(void *vaddr, size_t size, int direction);
32extern void __dma_sync_page(struct page *page, unsigned long offset,
33 size_t size, int direction);
34
35#else /* ! CONFIG_NOT_COHERENT_CACHE */
36/*
37 * Cache coherent cores.
38 */
39
40#define __dma_alloc_coherent(gfp, size, handle) NULL
41#define __dma_free_coherent(size, addr) ((void)0)
42#define __dma_sync(addr, size, rw) ((void)0)
43#define __dma_sync_page(pg, off, sz, rw) ((void)0)
44
45#endif /* ! CONFIG_NOT_COHERENT_CACHE */
46
47#ifdef CONFIG_PPC64
Mark Nelson3a4c6f02008-07-05 05:05:45 +100048
49static inline unsigned long device_to_mask(struct device *dev)
50{
51 if (dev->dma_mask && *dev->dma_mask)
52 return *dev->dma_mask;
53 /* Assume devices without mask can take 32 bit addresses */
54 return 0xfffffffful;
55}
56
Anton Blanchard33ff9102007-10-16 14:54:33 -050057/*
58 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
59 */
60struct dma_mapping_ops {
61 void * (*alloc_coherent)(struct device *dev, size_t size,
62 dma_addr_t *dma_handle, gfp_t flag);
63 void (*free_coherent)(struct device *dev, size_t size,
64 void *vaddr, dma_addr_t dma_handle);
65 dma_addr_t (*map_single)(struct device *dev, void *ptr,
Mark Nelson3affedc2008-07-05 05:05:42 +100066 size_t size, enum dma_data_direction direction,
67 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050068 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
Mark Nelson3affedc2008-07-05 05:05:42 +100069 size_t size, enum dma_data_direction direction,
70 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050071 int (*map_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100072 int nents, enum dma_data_direction direction,
73 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050074 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100075 int nents, enum dma_data_direction direction,
76 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050077 int (*dma_supported)(struct device *dev, u64 mask);
78 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
79};
80
81static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
82{
83 /* We don't handle the NULL dev case for ISA for now. We could
84 * do it via an out of line call but it is not needed for now. The
85 * only ISA DMA device we support is the floppy and we have a hack
86 * in the floppy driver directly to get a device for us.
87 */
88 if (unlikely(dev == NULL || dev->archdata.dma_ops == NULL))
89 return NULL;
90 return dev->archdata.dma_ops;
91}
92
Michael Ellerman1f62a162008-01-30 01:13:58 +110093static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops)
94{
95 dev->archdata.dma_ops = ops;
96}
97
Anton Blanchard33ff9102007-10-16 14:54:33 -050098static inline int dma_supported(struct device *dev, u64 mask)
99{
100 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
101
102 if (unlikely(dma_ops == NULL))
103 return 0;
104 if (dma_ops->dma_supported == NULL)
105 return 1;
106 return dma_ops->dma_supported(dev, mask);
107}
108
Michael Ellerman84631f32007-12-17 17:35:53 +1100109/* We have our own implementation of pci_set_dma_mask() */
110#define HAVE_ARCH_PCI_SET_DMA_MASK
111
Anton Blanchard33ff9102007-10-16 14:54:33 -0500112static inline int dma_set_mask(struct device *dev, u64 dma_mask)
113{
114 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
115
116 if (unlikely(dma_ops == NULL))
117 return -EIO;
118 if (dma_ops->set_dma_mask != NULL)
119 return dma_ops->set_dma_mask(dev, dma_mask);
120 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
121 return -EIO;
122 *dev->dma_mask = dma_mask;
123 return 0;
124}
125
Mark Nelson3affedc2008-07-05 05:05:42 +1000126static inline dma_addr_t dma_map_single_attrs(struct device *dev,
127 void *cpu_addr,
128 size_t size,
129 enum dma_data_direction direction,
130 struct dma_attrs *attrs)
131{
132 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
133
134 BUG_ON(!dma_ops);
135 return dma_ops->map_single(dev, cpu_addr, size, direction, attrs);
136}
137
138static inline void dma_unmap_single_attrs(struct device *dev,
139 dma_addr_t dma_addr,
140 size_t size,
141 enum dma_data_direction direction,
142 struct dma_attrs *attrs)
143{
144 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
145
146 BUG_ON(!dma_ops);
147 dma_ops->unmap_single(dev, dma_addr, size, direction, attrs);
148}
149
150static inline dma_addr_t dma_map_page_attrs(struct device *dev,
151 struct page *page,
152 unsigned long offset, size_t size,
153 enum dma_data_direction direction,
154 struct dma_attrs *attrs)
155{
156 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
157
158 BUG_ON(!dma_ops);
159 return dma_ops->map_single(dev, page_address(page) + offset, size,
160 direction, attrs);
161}
162
163static inline void dma_unmap_page_attrs(struct device *dev,
164 dma_addr_t dma_address,
165 size_t size,
166 enum dma_data_direction direction,
167 struct dma_attrs *attrs)
168{
169 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
170
171 BUG_ON(!dma_ops);
172 dma_ops->unmap_single(dev, dma_address, size, direction, attrs);
173}
174
175static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
176 int nents, enum dma_data_direction direction,
177 struct dma_attrs *attrs)
178{
179 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
180
181 BUG_ON(!dma_ops);
182 return dma_ops->map_sg(dev, sg, nents, direction, attrs);
183}
184
185static inline void dma_unmap_sg_attrs(struct device *dev,
186 struct scatterlist *sg,
187 int nhwentries,
188 enum dma_data_direction direction,
189 struct dma_attrs *attrs)
190{
191 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
192
193 BUG_ON(!dma_ops);
194 dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
195}
196
Anton Blanchard33ff9102007-10-16 14:54:33 -0500197static inline void *dma_alloc_coherent(struct device *dev, size_t size,
198 dma_addr_t *dma_handle, gfp_t flag)
199{
200 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
201
202 BUG_ON(!dma_ops);
203 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
204}
205
206static inline void dma_free_coherent(struct device *dev, size_t size,
207 void *cpu_addr, dma_addr_t dma_handle)
208{
209 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
210
211 BUG_ON(!dma_ops);
212 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
213}
214
215static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
216 size_t size,
217 enum dma_data_direction direction)
218{
Mark Nelson3affedc2008-07-05 05:05:42 +1000219 return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500220}
221
222static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
223 size_t size,
224 enum dma_data_direction direction)
225{
Mark Nelson3affedc2008-07-05 05:05:42 +1000226 dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500227}
228
229static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
230 unsigned long offset, size_t size,
231 enum dma_data_direction direction)
232{
Mark Nelson3affedc2008-07-05 05:05:42 +1000233 return dma_map_page_attrs(dev, page, offset, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500234}
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100235
236static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
237 size_t size,
238 enum dma_data_direction direction)
239{
Mark Nelson3affedc2008-07-05 05:05:42 +1000240 dma_unmap_page_attrs(dev, dma_address, size, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100241}
242
243static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
244 int nents, enum dma_data_direction direction)
245{
Mark Nelson3affedc2008-07-05 05:05:42 +1000246 return dma_map_sg_attrs(dev, sg, nents, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100247}
248
249static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
250 int nhwentries,
251 enum dma_data_direction direction)
252{
Mark Nelson3affedc2008-07-05 05:05:42 +1000253 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100254}
255
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100256/*
257 * Available generic sets of operations
258 */
259extern struct dma_mapping_ops dma_iommu_ops;
260extern struct dma_mapping_ops dma_direct_ops;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100261
262#else /* CONFIG_PPC64 */
263
264#define dma_supported(dev, mask) (1)
265
266static inline int dma_set_mask(struct device *dev, u64 dma_mask)
267{
268 if (!dev->dma_mask || !dma_supported(dev, mask))
269 return -EIO;
270
271 *dev->dma_mask = dma_mask;
272
273 return 0;
274}
275
276static inline void *dma_alloc_coherent(struct device *dev, size_t size,
277 dma_addr_t * dma_handle,
278 gfp_t gfp)
279{
280#ifdef CONFIG_NOT_COHERENT_CACHE
281 return __dma_alloc_coherent(size, dma_handle, gfp);
282#else
283 void *ret;
284 /* ignore region specifiers */
285 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
286
287 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
288 gfp |= GFP_DMA;
289
290 ret = (void *)__get_free_pages(gfp, get_order(size));
291
292 if (ret != NULL) {
293 memset(ret, 0, size);
294 *dma_handle = virt_to_bus(ret);
295 }
296
297 return ret;
298#endif
299}
300
301static inline void
302dma_free_coherent(struct device *dev, size_t size, void *vaddr,
303 dma_addr_t dma_handle)
304{
305#ifdef CONFIG_NOT_COHERENT_CACHE
306 __dma_free_coherent(size, vaddr);
307#else
308 free_pages((unsigned long)vaddr, get_order(size));
309#endif
310}
311
312static inline dma_addr_t
313dma_map_single(struct device *dev, void *ptr, size_t size,
314 enum dma_data_direction direction)
315{
316 BUG_ON(direction == DMA_NONE);
317
318 __dma_sync(ptr, size, direction);
319
320 return virt_to_bus(ptr);
321}
322
Segher Boessenkoolf774216d2007-08-02 01:41:15 +1000323static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
324 size_t size,
325 enum dma_data_direction direction)
326{
327 /* We do nothing. */
328}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100329
330static inline dma_addr_t
331dma_map_page(struct device *dev, struct page *page,
332 unsigned long offset, size_t size,
333 enum dma_data_direction direction)
334{
335 BUG_ON(direction == DMA_NONE);
336
337 __dma_sync_page(page, offset, size, direction);
338
339 return page_to_bus(page) + offset;
340}
341
Segher Boessenkoolf774216d2007-08-02 01:41:15 +1000342static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
343 size_t size,
344 enum dma_data_direction direction)
345{
346 /* We do nothing. */
347}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100348
349static inline int
Jens Axboe78bdc312007-10-12 13:44:12 +0200350dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100351 enum dma_data_direction direction)
352{
Jens Axboe78bdc312007-10-12 13:44:12 +0200353 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100354 int i;
355
356 BUG_ON(direction == DMA_NONE);
357
Jens Axboe78bdc312007-10-12 13:44:12 +0200358 for_each_sg(sgl, sg, nents, i) {
Olof Johansson5edadbd2007-10-23 09:13:14 +0200359 BUG_ON(!sg_page(sg));
360 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
361 sg->dma_address = page_to_bus(sg_page(sg)) + sg->offset;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100362 }
363
364 return nents;
365}
366
Segher Boessenkoolf774216d2007-08-02 01:41:15 +1000367static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
368 int nhwentries,
369 enum dma_data_direction direction)
370{
371 /* We don't do anything here. */
372}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100373
374#endif /* CONFIG_PPC64 */
375
376static inline void dma_sync_single_for_cpu(struct device *dev,
377 dma_addr_t dma_handle, size_t size,
378 enum dma_data_direction direction)
379{
380 BUG_ON(direction == DMA_NONE);
381 __dma_sync(bus_to_virt(dma_handle), size, direction);
382}
383
384static inline void dma_sync_single_for_device(struct device *dev,
385 dma_addr_t dma_handle, size_t size,
386 enum dma_data_direction direction)
387{
388 BUG_ON(direction == DMA_NONE);
389 __dma_sync(bus_to_virt(dma_handle), size, direction);
390}
391
392static inline void dma_sync_sg_for_cpu(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200393 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100394 enum dma_data_direction direction)
395{
Jens Axboe78bdc312007-10-12 13:44:12 +0200396 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100397 int i;
398
399 BUG_ON(direction == DMA_NONE);
400
Jens Axboe78bdc312007-10-12 13:44:12 +0200401 for_each_sg(sgl, sg, nents, i)
Olof Johansson5edadbd2007-10-23 09:13:14 +0200402 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100403}
404
405static inline void dma_sync_sg_for_device(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200406 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100407 enum dma_data_direction direction)
408{
Jens Axboe78bdc312007-10-12 13:44:12 +0200409 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100410 int i;
411
412 BUG_ON(direction == DMA_NONE);
413
Jens Axboe78bdc312007-10-12 13:44:12 +0200414 for_each_sg(sgl, sg, nents, i)
Olof Johansson5edadbd2007-10-23 09:13:14 +0200415 __dma_sync_page(sg_page(sg), sg->offset, sg->length, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100416}
417
418static inline int dma_mapping_error(dma_addr_t dma_addr)
419{
420#ifdef CONFIG_PPC64
421 return (dma_addr == DMA_ERROR_CODE);
422#else
423 return 0;
424#endif
425}
426
427#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
428#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
429#ifdef CONFIG_NOT_COHERENT_CACHE
Ralf Baechlef67637e2006-12-06 20:38:54 -0800430#define dma_is_consistent(d, h) (0)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100431#else
Ralf Baechlef67637e2006-12-06 20:38:54 -0800432#define dma_is_consistent(d, h) (1)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100433#endif
434
435static inline int dma_get_cache_alignment(void)
436{
437#ifdef CONFIG_PPC64
438 /* no easy way to get cache size on all processors, so return
439 * the maximum possible, to be safe */
Ravikiran G Thirumalai1fd73c62006-01-08 01:01:28 -0800440 return (1 << INTERNODE_CACHE_SHIFT);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100441#else
442 /*
443 * Each processor family will define its own L1_CACHE_SHIFT,
444 * L1_CACHE_BYTES wraps to this, so this is always safe.
445 */
446 return L1_CACHE_BYTES;
447#endif
448}
449
450static inline void dma_sync_single_range_for_cpu(struct device *dev,
451 dma_addr_t dma_handle, unsigned long offset, size_t size,
452 enum dma_data_direction direction)
453{
454 /* just sync everything for now */
455 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
456}
457
458static inline void dma_sync_single_range_for_device(struct device *dev,
459 dma_addr_t dma_handle, unsigned long offset, size_t size,
460 enum dma_data_direction direction)
461{
462 /* just sync everything for now */
463 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
464}
465
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800466static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100467 enum dma_data_direction direction)
468{
469 BUG_ON(direction == DMA_NONE);
470 __dma_sync(vaddr, size, (int)direction);
471}
472
Arnd Bergmann88ced032005-12-16 22:43:46 +0100473#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100474#endif /* _ASM_DMA_MAPPING_H */