blob: 58a25b13a5b325d0f1a9a3c7860931a79020b850 [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Arnd Bergmann88ced032005-12-16 22:43:46 +01009#ifdef __KERNEL__
Stephen Rothwell78b09732005-11-19 01:40:46 +110010
11#include <linux/config.h>
12#include <linux/types.h>
13#include <linux/cache.h>
14/* need struct page definitions */
15#include <linux/mm.h>
16#include <asm/scatterlist.h>
17#include <asm/io.h>
18#include <asm/bug.h>
19
20#define DMA_ERROR_CODE (~(dma_addr_t)0x0)
21
22#ifdef CONFIG_NOT_COHERENT_CACHE
23/*
24 * DMA-consistent mapping functions for PowerPCs that don't support
25 * cache snooping. These allocate/free a region of uncached mapped
26 * memory space for use with DMA devices. Alternatively, you could
27 * allocate the space "normally" and use the cache management functions
28 * to ensure it is consistent.
29 */
30extern void *__dma_alloc_coherent(size_t size, dma_addr_t *handle, gfp_t gfp);
31extern void __dma_free_coherent(size_t size, void *vaddr);
32extern void __dma_sync(void *vaddr, size_t size, int direction);
33extern void __dma_sync_page(struct page *page, unsigned long offset,
34 size_t size, int direction);
35
36#else /* ! CONFIG_NOT_COHERENT_CACHE */
37/*
38 * Cache coherent cores.
39 */
40
41#define __dma_alloc_coherent(gfp, size, handle) NULL
42#define __dma_free_coherent(size, addr) do { } while (0)
43#define __dma_sync(addr, size, rw) do { } while (0)
44#define __dma_sync_page(pg, off, sz, rw) do { } while (0)
45
46#endif /* ! CONFIG_NOT_COHERENT_CACHE */
47
48#ifdef CONFIG_PPC64
49
50extern int dma_supported(struct device *dev, u64 mask);
51extern int dma_set_mask(struct device *dev, u64 dma_mask);
52extern void *dma_alloc_coherent(struct device *dev, size_t size,
53 dma_addr_t *dma_handle, gfp_t flag);
54extern void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr,
55 dma_addr_t dma_handle);
56extern dma_addr_t dma_map_single(struct device *dev, void *cpu_addr,
57 size_t size, enum dma_data_direction direction);
58extern void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
59 size_t size, enum dma_data_direction direction);
60extern dma_addr_t dma_map_page(struct device *dev, struct page *page,
61 unsigned long offset, size_t size,
62 enum dma_data_direction direction);
63extern void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
64 size_t size, enum dma_data_direction direction);
65extern int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
66 enum dma_data_direction direction);
67extern void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
68 int nhwentries, enum dma_data_direction direction);
69
70#else /* CONFIG_PPC64 */
71
72#define dma_supported(dev, mask) (1)
73
74static inline int dma_set_mask(struct device *dev, u64 dma_mask)
75{
76 if (!dev->dma_mask || !dma_supported(dev, mask))
77 return -EIO;
78
79 *dev->dma_mask = dma_mask;
80
81 return 0;
82}
83
84static inline void *dma_alloc_coherent(struct device *dev, size_t size,
85 dma_addr_t * dma_handle,
86 gfp_t gfp)
87{
88#ifdef CONFIG_NOT_COHERENT_CACHE
89 return __dma_alloc_coherent(size, dma_handle, gfp);
90#else
91 void *ret;
92 /* ignore region specifiers */
93 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
94
95 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
96 gfp |= GFP_DMA;
97
98 ret = (void *)__get_free_pages(gfp, get_order(size));
99
100 if (ret != NULL) {
101 memset(ret, 0, size);
102 *dma_handle = virt_to_bus(ret);
103 }
104
105 return ret;
106#endif
107}
108
109static inline void
110dma_free_coherent(struct device *dev, size_t size, void *vaddr,
111 dma_addr_t dma_handle)
112{
113#ifdef CONFIG_NOT_COHERENT_CACHE
114 __dma_free_coherent(size, vaddr);
115#else
116 free_pages((unsigned long)vaddr, get_order(size));
117#endif
118}
119
120static inline dma_addr_t
121dma_map_single(struct device *dev, void *ptr, size_t size,
122 enum dma_data_direction direction)
123{
124 BUG_ON(direction == DMA_NONE);
125
126 __dma_sync(ptr, size, direction);
127
128 return virt_to_bus(ptr);
129}
130
131/* We do nothing. */
132#define dma_unmap_single(dev, addr, size, dir) do { } while (0)
133
134static inline dma_addr_t
135dma_map_page(struct device *dev, struct page *page,
136 unsigned long offset, size_t size,
137 enum dma_data_direction direction)
138{
139 BUG_ON(direction == DMA_NONE);
140
141 __dma_sync_page(page, offset, size, direction);
142
143 return page_to_bus(page) + offset;
144}
145
146/* We do nothing. */
147#define dma_unmap_page(dev, handle, size, dir) do { } while (0)
148
149static inline int
150dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
151 enum dma_data_direction direction)
152{
153 int i;
154
155 BUG_ON(direction == DMA_NONE);
156
157 for (i = 0; i < nents; i++, sg++) {
158 BUG_ON(!sg->page);
159 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
160 sg->dma_address = page_to_bus(sg->page) + sg->offset;
161 }
162
163 return nents;
164}
165
166/* We don't do anything here. */
167#define dma_unmap_sg(dev, sg, nents, dir) do { } while (0)
168
169#endif /* CONFIG_PPC64 */
170
171static inline void dma_sync_single_for_cpu(struct device *dev,
172 dma_addr_t dma_handle, size_t size,
173 enum dma_data_direction direction)
174{
175 BUG_ON(direction == DMA_NONE);
176 __dma_sync(bus_to_virt(dma_handle), size, direction);
177}
178
179static inline void dma_sync_single_for_device(struct device *dev,
180 dma_addr_t dma_handle, size_t size,
181 enum dma_data_direction direction)
182{
183 BUG_ON(direction == DMA_NONE);
184 __dma_sync(bus_to_virt(dma_handle), size, direction);
185}
186
187static inline void dma_sync_sg_for_cpu(struct device *dev,
188 struct scatterlist *sg, int nents,
189 enum dma_data_direction direction)
190{
191 int i;
192
193 BUG_ON(direction == DMA_NONE);
194
195 for (i = 0; i < nents; i++, sg++)
196 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
197}
198
199static inline void dma_sync_sg_for_device(struct device *dev,
200 struct scatterlist *sg, int nents,
201 enum dma_data_direction direction)
202{
203 int i;
204
205 BUG_ON(direction == DMA_NONE);
206
207 for (i = 0; i < nents; i++, sg++)
208 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
209}
210
211static inline int dma_mapping_error(dma_addr_t dma_addr)
212{
213#ifdef CONFIG_PPC64
214 return (dma_addr == DMA_ERROR_CODE);
215#else
216 return 0;
217#endif
218}
219
220#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
221#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
222#ifdef CONFIG_NOT_COHERENT_CACHE
223#define dma_is_consistent(d) (0)
224#else
225#define dma_is_consistent(d) (1)
226#endif
227
228static inline int dma_get_cache_alignment(void)
229{
230#ifdef CONFIG_PPC64
231 /* no easy way to get cache size on all processors, so return
232 * the maximum possible, to be safe */
233 return (1 << L1_CACHE_SHIFT_MAX);
234#else
235 /*
236 * Each processor family will define its own L1_CACHE_SHIFT,
237 * L1_CACHE_BYTES wraps to this, so this is always safe.
238 */
239 return L1_CACHE_BYTES;
240#endif
241}
242
243static inline void dma_sync_single_range_for_cpu(struct device *dev,
244 dma_addr_t dma_handle, unsigned long offset, size_t size,
245 enum dma_data_direction direction)
246{
247 /* just sync everything for now */
248 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
249}
250
251static inline void dma_sync_single_range_for_device(struct device *dev,
252 dma_addr_t dma_handle, unsigned long offset, size_t size,
253 enum dma_data_direction direction)
254{
255 /* just sync everything for now */
256 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
257}
258
259static inline void dma_cache_sync(void *vaddr, size_t size,
260 enum dma_data_direction direction)
261{
262 BUG_ON(direction == DMA_NONE);
263 __dma_sync(vaddr, size, (int)direction);
264}
265
266/*
267 * DMA operations are abstracted for G5 vs. i/pSeries, PCI vs. VIO
268 */
269struct dma_mapping_ops {
270 void * (*alloc_coherent)(struct device *dev, size_t size,
271 dma_addr_t *dma_handle, gfp_t flag);
272 void (*free_coherent)(struct device *dev, size_t size,
273 void *vaddr, dma_addr_t dma_handle);
274 dma_addr_t (*map_single)(struct device *dev, void *ptr,
275 size_t size, enum dma_data_direction direction);
276 void (*unmap_single)(struct device *dev, dma_addr_t dma_addr,
277 size_t size, enum dma_data_direction direction);
278 int (*map_sg)(struct device *dev, struct scatterlist *sg,
279 int nents, enum dma_data_direction direction);
280 void (*unmap_sg)(struct device *dev, struct scatterlist *sg,
281 int nents, enum dma_data_direction direction);
282 int (*dma_supported)(struct device *dev, u64 mask);
283 int (*dac_dma_supported)(struct device *dev, u64 mask);
284};
285
Arnd Bergmann88ced032005-12-16 22:43:46 +0100286#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100287#endif /* _ASM_DMA_MAPPING_H */