blob: 86cef7ddc8d5f251b07c6c838bf7dcba9d8d579d [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Anton Blanchard33ff9102007-10-16 14:54:33 -05009#ifdef __KERNEL__
10
11#include <linux/types.h>
12#include <linux/cache.h>
13/* need struct page definitions */
14#include <linux/mm.h>
15#include <linux/scatterlist.h>
Mark Nelson3affedc2008-07-05 05:05:42 +100016#include <linux/dma-attrs.h>
Anton Blanchard33ff9102007-10-16 14:54:33 -050017#include <asm/io.h>
18
19#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
20
21#ifdef CONFIG_NOT_COHERENT_CACHE
22/*
23 * DMA-consistent mapping functions for PowerPCs that don't support
24 * cache snooping. These allocate/free a region of uncached mapped
25 * memory space for use with DMA devices. Alternatively, you could
26 * allocate the space "normally" and use the cache management functions
27 * to ensure it is consistent.
28 */
29extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
30extern void __dma_free_coherent(size_t size, void *vaddr);
31extern void __dma_sync(void *vaddr, size_t size, int direction);
32extern void __dma_sync_page(struct page *page, unsigned long offset,
33 size_t size, int direction);
34
35#else /* ! CONFIG_NOT_COHERENT_CACHE */
36/*
37 * Cache coherent cores.
38 */
39
40#define __dma_alloc_coherent(gfp, size, handle) NULL
41#define __dma_free_coherent(size, addr) ((void)0)
42#define __dma_sync(addr, size, rw) ((void)0)
43#define __dma_sync_page(pg, off, sz, rw) ((void)0)
44
45#endif /* ! CONFIG_NOT_COHERENT_CACHE */
46
Mark Nelson3a4c6f02008-07-05 05:05:45 +100047static inline unsigned long device_to_mask(struct device *dev)
48{
49 if (dev->dma_mask && *dev->dma_mask)
50 return *dev->dma_mask;
51 /* Assume devices without mask can take 32 bit addresses */
52 return 0xfffffffful;
53}
54
Anton Blanchard33ff9102007-10-16 14:54:33 -050055/*
56 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
57 */
58struct dma_mapping_ops {
59 void * (*alloc_coherent)(struct device *dev, size_t size,
60 dma_addr_t *dma_handle, gfp_t flag);
61 void (*free_coherent)(struct device *dev, size_t size,
62 void *vaddr, dma_addr_t dma_handle);
Anton Blanchard33ff9102007-10-16 14:54:33 -050063 int (*map_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100064 int nents, enum dma_data_direction direction,
65 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050066 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
Mark Nelson3affedc2008-07-05 05:05:42 +100067 int nents, enum dma_data_direction direction,
68 struct dma_attrs *attrs);
Anton Blanchard33ff9102007-10-16 14:54:33 -050069 int (*dma_supported)(struct device *dev, u64 mask);
70 int (*set_dma_mask)(struct device *dev, u64 dma_mask);
Becky Bruce4fc665b2008-09-12 10:34:46 +000071 dma_addr_t (*map_page)(struct device *dev, struct page *page,
72 unsigned long offset, size_t size,
73 enum dma_data_direction direction,
74 struct dma_attrs *attrs);
75 void (*unmap_page)(struct device *dev,
76 dma_addr_t dma_address, size_t size,
77 enum dma_data_direction direction,
78 struct dma_attrs *attrs);
Becky Bruce15e09c02008-11-20 06:49:16 +000079#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
80 void (*sync_single_range_for_cpu)(struct device *hwdev,
81 dma_addr_t dma_handle, unsigned long offset,
82 size_t size,
83 enum dma_data_direction direction);
84 void (*sync_single_range_for_device)(struct device *hwdev,
85 dma_addr_t dma_handle, unsigned long offset,
86 size_t size,
87 enum dma_data_direction direction);
88 void (*sync_sg_for_cpu)(struct device *hwdev,
89 struct scatterlist *sg, int nelems,
90 enum dma_data_direction direction);
91 void (*sync_sg_for_device)(struct device *hwdev,
92 struct scatterlist *sg, int nelems,
93 enum dma_data_direction direction);
94#endif
Anton Blanchard33ff9102007-10-16 14:54:33 -050095};
96
Becky Bruce4fc665b2008-09-12 10:34:46 +000097/*
98 * Available generic sets of operations
99 */
100#ifdef CONFIG_PPC64
101extern struct dma_mapping_ops dma_iommu_ops;
102#endif
103extern struct dma_mapping_ops dma_direct_ops;
104
Anton Blanchard33ff9102007-10-16 14:54:33 -0500105static inline struct dma_mapping_ops *get_dma_ops(struct device *dev)
106{
107 /* We don't handle the NULL dev case for ISA for now. We could
108 * do it via an out of line call but it is not needed for now. The
109 * only ISA DMA device we support is the floppy and we have a hack
110 * in the floppy driver directly to get a device for us.
111 */
Becky Bruce4fc665b2008-09-12 10:34:46 +0000112
113 if (unlikely(dev == NULL) || dev->archdata.dma_ops == NULL) {
114#ifdef CONFIG_PPC64
Anton Blanchard33ff9102007-10-16 14:54:33 -0500115 return NULL;
Becky Bruce4fc665b2008-09-12 10:34:46 +0000116#else
117 /* Use default on 32-bit if dma_ops is not set up */
118 /* TODO: Long term, we should fix drivers so that dev and
119 * archdata dma_ops are set up for all buses.
120 */
121 return &dma_direct_ops;
122#endif
123 }
124
Anton Blanchard33ff9102007-10-16 14:54:33 -0500125 return dev->archdata.dma_ops;
126}
127
Michael Ellerman1f62a162008-01-30 01:13:58 +1100128static inline void set_dma_ops(struct device *dev, struct dma_mapping_ops *ops)
129{
130 dev->archdata.dma_ops = ops;
131}
132
Anton Blanchard33ff9102007-10-16 14:54:33 -0500133static inline int dma_supported(struct device *dev, u64 mask)
134{
135 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
136
137 if (unlikely(dma_ops == NULL))
138 return 0;
139 if (dma_ops->dma_supported == NULL)
140 return 1;
141 return dma_ops->dma_supported(dev, mask);
142}
143
Michael Ellerman84631f32007-12-17 17:35:53 +1100144/* We have our own implementation of pci_set_dma_mask() */
145#define HAVE_ARCH_PCI_SET_DMA_MASK
146
Anton Blanchard33ff9102007-10-16 14:54:33 -0500147static inline int dma_set_mask(struct device *dev, u64 dma_mask)
148{
149 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
150
151 if (unlikely(dma_ops == NULL))
152 return -EIO;
153 if (dma_ops->set_dma_mask != NULL)
154 return dma_ops->set_dma_mask(dev, dma_mask);
155 if (!dev->dma_mask || !dma_supported(dev, dma_mask))
156 return -EIO;
157 *dev->dma_mask = dma_mask;
158 return 0;
159}
160
Becky Bruce4fc665b2008-09-12 10:34:46 +0000161/*
Mark Nelsonc73049f2008-10-27 20:38:14 +0000162 * map_/unmap_single actually call through to map/unmap_page now that all the
163 * dma_mapping_ops have been converted over. We just have to get the page and
164 * offset to pass through to map_page
Becky Bruce4fc665b2008-09-12 10:34:46 +0000165 */
Mark Nelson3affedc2008-07-05 05:05:42 +1000166static inline dma_addr_t dma_map_single_attrs(struct device *dev,
167 void *cpu_addr,
168 size_t size,
169 enum dma_data_direction direction,
170 struct dma_attrs *attrs)
171{
172 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
173
174 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000175
Becky Bruce4fc665b2008-09-12 10:34:46 +0000176 return dma_ops->map_page(dev, virt_to_page(cpu_addr),
177 (unsigned long)cpu_addr % PAGE_SIZE, size,
178 direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000179}
180
181static inline void dma_unmap_single_attrs(struct device *dev,
182 dma_addr_t dma_addr,
183 size_t size,
184 enum dma_data_direction direction,
185 struct dma_attrs *attrs)
186{
187 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
188
189 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000190
Becky Bruce4fc665b2008-09-12 10:34:46 +0000191 dma_ops->unmap_page(dev, dma_addr, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000192}
193
194static inline dma_addr_t dma_map_page_attrs(struct device *dev,
195 struct page *page,
196 unsigned long offset, size_t size,
197 enum dma_data_direction direction,
198 struct dma_attrs *attrs)
199{
200 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
201
202 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000203
Mark Nelsonc73049f2008-10-27 20:38:14 +0000204 return dma_ops->map_page(dev, page, offset, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000205}
206
207static inline void dma_unmap_page_attrs(struct device *dev,
208 dma_addr_t dma_address,
209 size_t size,
210 enum dma_data_direction direction,
211 struct dma_attrs *attrs)
212{
213 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
214
215 BUG_ON(!dma_ops);
Becky Bruce4fc665b2008-09-12 10:34:46 +0000216
Mark Nelsonc73049f2008-10-27 20:38:14 +0000217 dma_ops->unmap_page(dev, dma_address, size, direction, attrs);
Mark Nelson3affedc2008-07-05 05:05:42 +1000218}
219
220static inline int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg,
221 int nents, enum dma_data_direction direction,
222 struct dma_attrs *attrs)
223{
224 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
225
226 BUG_ON(!dma_ops);
227 return dma_ops->map_sg(dev, sg, nents, direction, attrs);
228}
229
230static inline void dma_unmap_sg_attrs(struct device *dev,
231 struct scatterlist *sg,
232 int nhwentries,
233 enum dma_data_direction direction,
234 struct dma_attrs *attrs)
235{
236 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
237
238 BUG_ON(!dma_ops);
239 dma_ops->unmap_sg(dev, sg, nhwentries, direction, attrs);
240}
241
Anton Blanchard33ff9102007-10-16 14:54:33 -0500242static inline void *dma_alloc_coherent(struct device *dev, size_t size,
243 dma_addr_t *dma_handle, gfp_t flag)
244{
245 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
246
247 BUG_ON(!dma_ops);
248 return dma_ops->alloc_coherent(dev, size, dma_handle, flag);
249}
250
251static inline void dma_free_coherent(struct device *dev, size_t size,
252 void *cpu_addr, dma_addr_t dma_handle)
253{
254 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
255
256 BUG_ON(!dma_ops);
257 dma_ops->free_coherent(dev, size, cpu_addr, dma_handle);
258}
259
260static inline dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
261 size_t size,
262 enum dma_data_direction direction)
263{
Mark Nelson3affedc2008-07-05 05:05:42 +1000264 return dma_map_single_attrs(dev, cpu_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500265}
266
267static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
268 size_t size,
269 enum dma_data_direction direction)
270{
Mark Nelson3affedc2008-07-05 05:05:42 +1000271 dma_unmap_single_attrs(dev, dma_addr, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500272}
273
274static inline dma_addr_t dma_map_page(struct device *dev, struct page *page,
275 unsigned long offset, size_t size,
276 enum dma_data_direction direction)
277{
Mark Nelson3affedc2008-07-05 05:05:42 +1000278 return dma_map_page_attrs(dev, page, offset, size, direction, NULL);
Anton Blanchard33ff9102007-10-16 14:54:33 -0500279}
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100280
281static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
282 size_t size,
283 enum dma_data_direction direction)
284{
Mark Nelson3affedc2008-07-05 05:05:42 +1000285 dma_unmap_page_attrs(dev, dma_address, size, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100286}
287
288static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
289 int nents, enum dma_data_direction direction)
290{
Mark Nelson3affedc2008-07-05 05:05:42 +1000291 return dma_map_sg_attrs(dev, sg, nents, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100292}
293
294static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
295 int nhwentries,
296 enum dma_data_direction direction)
297{
Mark Nelson3affedc2008-07-05 05:05:42 +1000298 dma_unmap_sg_attrs(dev, sg, nhwentries, direction, NULL);
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +1100299}
300
Becky Bruce15e09c02008-11-20 06:49:16 +0000301#ifdef CONFIG_PPC_NEED_DMA_SYNC_OPS
Stephen Rothwell78b09732005-11-19 01:40:46 +1100302static inline void dma_sync_single_for_cpu(struct device *dev,
303 dma_addr_t dma_handle, size_t size,
304 enum dma_data_direction direction)
305{
Becky Bruce15e09c02008-11-20 06:49:16 +0000306 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
307
308 BUG_ON(!dma_ops);
309 dma_ops->sync_single_range_for_cpu(dev, dma_handle, 0,
310 size, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100311}
312
313static inline void dma_sync_single_for_device(struct device *dev,
314 dma_addr_t dma_handle, size_t size,
315 enum dma_data_direction direction)
316{
Becky Bruce15e09c02008-11-20 06:49:16 +0000317 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
318
319 BUG_ON(!dma_ops);
320 dma_ops->sync_single_range_for_device(dev, dma_handle,
321 0, size, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100322}
323
324static inline void dma_sync_sg_for_cpu(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200325 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100326 enum dma_data_direction direction)
327{
Becky Bruce15e09c02008-11-20 06:49:16 +0000328 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100329
Becky Bruce15e09c02008-11-20 06:49:16 +0000330 BUG_ON(!dma_ops);
331 dma_ops->sync_sg_for_cpu(dev, sgl, nents, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100332}
333
334static inline void dma_sync_sg_for_device(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200335 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100336 enum dma_data_direction direction)
337{
Becky Bruce15e09c02008-11-20 06:49:16 +0000338 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100339
Becky Bruce15e09c02008-11-20 06:49:16 +0000340 BUG_ON(!dma_ops);
341 dma_ops->sync_sg_for_device(dev, sgl, nents, direction);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100342}
343
Becky Bruce15e09c02008-11-20 06:49:16 +0000344static inline void dma_sync_single_range_for_cpu(struct device *dev,
345 dma_addr_t dma_handle, unsigned long offset, size_t size,
346 enum dma_data_direction direction)
347{
348 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
349
350 BUG_ON(!dma_ops);
351 dma_ops->sync_single_range_for_cpu(dev, dma_handle,
352 offset, size, direction);
353}
354
355static inline void dma_sync_single_range_for_device(struct device *dev,
356 dma_addr_t dma_handle, unsigned long offset, size_t size,
357 enum dma_data_direction direction)
358{
359 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
360
361 BUG_ON(!dma_ops);
362 dma_ops->sync_single_range_for_device(dev, dma_handle, offset,
363 size, direction);
364}
365#else /* CONFIG_PPC_NEED_DMA_SYNC_OPS */
Becky Bruce0efbb572008-12-04 08:12:40 +0000366static inline void dma_sync_single_for_cpu(struct device *dev,
367 dma_addr_t dma_handle, size_t size,
368 enum dma_data_direction direction)
369{
370}
371
372static inline void dma_sync_single_for_device(struct device *dev,
373 dma_addr_t dma_handle, size_t size,
374 enum dma_data_direction direction)
375{
376}
377
378static inline void dma_sync_sg_for_cpu(struct device *dev,
379 struct scatterlist *sgl, int nents,
380 enum dma_data_direction direction)
381{
382}
383
384static inline void dma_sync_sg_for_device(struct device *dev,
385 struct scatterlist *sgl, int nents,
386 enum dma_data_direction direction)
387{
388}
389
390static inline void dma_sync_single_range_for_cpu(struct device *dev,
391 dma_addr_t dma_handle, unsigned long offset, size_t size,
392 enum dma_data_direction direction)
393{
394}
395
396static inline void dma_sync_single_range_for_device(struct device *dev,
397 dma_addr_t dma_handle, unsigned long offset, size_t size,
398 enum dma_data_direction direction)
399{
400}
Becky Bruce15e09c02008-11-20 06:49:16 +0000401#endif
402
FUJITA Tomonori8d8bb392008-07-25 19:44:49 -0700403static inline int dma_mapping_error(struct device *dev, dma_addr_t dma_addr)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100404{
405#ifdef CONFIG_PPC64
406 return (dma_addr == DMA_ERROR_CODE);
407#else
408 return 0;
409#endif
410}
411
412#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
413#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
414#ifdef CONFIG_NOT_COHERENT_CACHE
Ralf Baechlef67637e2006-12-06 20:38:54 -0800415#define dma_is_consistent(d, h) (0)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100416#else
Ralf Baechlef67637e2006-12-06 20:38:54 -0800417#define dma_is_consistent(d, h) (1)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100418#endif
419
420static inline int dma_get_cache_alignment(void)
421{
422#ifdef CONFIG_PPC64
423 /* no easy way to get cache size on all processors, so return
424 * the maximum possible, to be safe */
Ravikiran G Thirumalai1fd73c62006-01-08 01:01:28 -0800425 return (1 << INTERNODE_CACHE_SHIFT);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100426#else
427 /*
428 * Each processor family will define its own L1_CACHE_SHIFT,
429 * L1_CACHE_BYTES wraps to this, so this is always safe.
430 */
431 return L1_CACHE_BYTES;
432#endif
433}
434
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800435static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100436 enum dma_data_direction direction)
437{
438 BUG_ON(direction == DMA_NONE);
439 __dma_sync(vaddr, size, (int)direction);
440}
441
Arnd Bergmann88ced032005-12-16 22:43:46 +0100442#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100443#endif /* _ASM_DMA_MAPPING_H */