blob: 2af321f36aba8aa025d6f43360367b6e5c18329d [file] [log] [blame]
Stephen Rothwell78b09732005-11-19 01:40:46 +11001/*
2 * Copyright (C) 2004 IBM
3 *
4 * Implements the generic device dma API for powerpc.
5 * the pci and vio busses
6 */
7#ifndef _ASM_DMA_MAPPING_H
8#define _ASM_DMA_MAPPING_H
Benjamin Herrenschmidt12d04ee2006-11-11 17:25:02 +11009
10static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
11 size_t size,
12 enum dma_data_direction direction)
13{
14 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
15
16 BUG_ON(!dma_ops);
17 dma_ops->unmap_single(dev, dma_address, size, direction);
18}
19
20static inline int dma_map_sg(struct device *dev, struct scatterlist *sg,
21 int nents, enum dma_data_direction direction)
22{
23 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
24
25 BUG_ON(!dma_ops);
26 return dma_ops->map_sg(dev, sg, nents, direction);
27}
28
29static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
30 int nhwentries,
31 enum dma_data_direction direction)
32{
33 struct dma_mapping_ops *dma_ops = get_dma_ops(dev);
34
35 BUG_ON(!dma_ops);
36 dma_ops->unmap_sg(dev, sg, nhwentries, direction);
37}
38
39
40/*
41 * Available generic sets of operations
42 */
43extern struct dma_mapping_ops dma_iommu_ops;
44extern struct dma_mapping_ops dma_direct_ops;
Stephen Rothwell78b09732005-11-19 01:40:46 +110045
Benjamin Herrenschmidt92b20c42006-11-11 17:25:14 +110046extern unsigned long dma_direct_offset;
47
Stephen Rothwell78b09732005-11-19 01:40:46 +110048#else /* CONFIG_PPC64 */
49
50#define dma_supported(dev, mask) (1)
51
52static inline int dma_set_mask(struct device *dev, u64 dma_mask)
53{
54 if (!dev->dma_mask || !dma_supported(dev, mask))
55 return -EIO;
56
57 *dev->dma_mask = dma_mask;
58
59 return 0;
60}
61
62static inline void *dma_alloc_coherent(struct device *dev, size_t size,
63 dma_addr_t * dma_handle,
64 gfp_t gfp)
65{
66#ifdef CONFIG_NOT_COHERENT_CACHE
67 return __dma_alloc_coherent(size, dma_handle, gfp);
68#else
69 void *ret;
70 /* ignore region specifiers */
71 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
72
73 if (dev == NULL || dev->coherent_dma_mask < 0xffffffff)
74 gfp |= GFP_DMA;
75
76 ret = (void *)__get_free_pages(gfp, get_order(size));
77
78 if (ret != NULL) {
79 memset(ret, 0, size);
80 *dma_handle = virt_to_bus(ret);
81 }
82
83 return ret;
84#endif
85}
86
87static inline void
88dma_free_coherent(struct device *dev, size_t size, void *vaddr,
89 dma_addr_t dma_handle)
90{
91#ifdef CONFIG_NOT_COHERENT_CACHE
92 __dma_free_coherent(size, vaddr);
93#else
94 free_pages((unsigned long)vaddr, get_order(size));
95#endif
96}
97
98static inline dma_addr_t
99dma_map_single(struct device *dev, void *ptr, size_t size,
100 enum dma_data_direction direction)
101{
102 BUG_ON(direction == DMA_NONE);
103
104 __dma_sync(ptr, size, direction);
105
106 return virt_to_bus(ptr);
107}
108
Segher Boessenkoolf7742162007-08-02 01:41:15 +1000109static inline void dma_unmap_single(struct device *dev, dma_addr_t dma_addr,
110 size_t size,
111 enum dma_data_direction direction)
112{
113 /* We do nothing. */
114}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100115
116static inline dma_addr_t
117dma_map_page(struct device *dev, struct page *page,
118 unsigned long offset, size_t size,
119 enum dma_data_direction direction)
120{
121 BUG_ON(direction == DMA_NONE);
122
123 __dma_sync_page(page, offset, size, direction);
124
125 return page_to_bus(page) + offset;
126}
127
Segher Boessenkoolf7742162007-08-02 01:41:15 +1000128static inline void dma_unmap_page(struct device *dev, dma_addr_t dma_address,
129 size_t size,
130 enum dma_data_direction direction)
131{
132 /* We do nothing. */
133}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100134
135static inline int
Jens Axboe78bdc312007-10-12 13:44:12 +0200136dma_map_sg(struct device *dev, struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100137 enum dma_data_direction direction)
138{
Jens Axboe78bdc312007-10-12 13:44:12 +0200139 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100140 int i;
141
142 BUG_ON(direction == DMA_NONE);
143
Jens Axboe78bdc312007-10-12 13:44:12 +0200144 for_each_sg(sgl, sg, nents, i) {
Stephen Rothwell78b09732005-11-19 01:40:46 +1100145 BUG_ON(!sg->page);
146 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
147 sg->dma_address = page_to_bus(sg->page) + sg->offset;
148 }
149
150 return nents;
151}
152
Segher Boessenkoolf7742162007-08-02 01:41:15 +1000153static inline void dma_unmap_sg(struct device *dev, struct scatterlist *sg,
154 int nhwentries,
155 enum dma_data_direction direction)
156{
157 /* We don't do anything here. */
158}
Stephen Rothwell78b09732005-11-19 01:40:46 +1100159
160#endif /* CONFIG_PPC64 */
161
162static inline void dma_sync_single_for_cpu(struct device *dev,
163 dma_addr_t dma_handle, size_t size,
164 enum dma_data_direction direction)
165{
166 BUG_ON(direction == DMA_NONE);
167 __dma_sync(bus_to_virt(dma_handle), size, direction);
168}
169
170static inline void dma_sync_single_for_device(struct device *dev,
171 dma_addr_t dma_handle, size_t size,
172 enum dma_data_direction direction)
173{
174 BUG_ON(direction == DMA_NONE);
175 __dma_sync(bus_to_virt(dma_handle), size, direction);
176}
177
178static inline void dma_sync_sg_for_cpu(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200179 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100180 enum dma_data_direction direction)
181{
Jens Axboe78bdc312007-10-12 13:44:12 +0200182 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100183 int i;
184
185 BUG_ON(direction == DMA_NONE);
186
Jens Axboe78bdc312007-10-12 13:44:12 +0200187 for_each_sg(sgl, sg, nents, i)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100188 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
189}
190
191static inline void dma_sync_sg_for_device(struct device *dev,
Jens Axboe78bdc312007-10-12 13:44:12 +0200192 struct scatterlist *sgl, int nents,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100193 enum dma_data_direction direction)
194{
Jens Axboe78bdc312007-10-12 13:44:12 +0200195 struct scatterlist *sg;
Stephen Rothwell78b09732005-11-19 01:40:46 +1100196 int i;
197
198 BUG_ON(direction == DMA_NONE);
199
Jens Axboe78bdc312007-10-12 13:44:12 +0200200 for_each_sg(sgl, sg, nents, i)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100201 __dma_sync_page(sg->page, sg->offset, sg->length, direction);
202}
203
204static inline int dma_mapping_error(dma_addr_t dma_addr)
205{
206#ifdef CONFIG_PPC64
207 return (dma_addr == DMA_ERROR_CODE);
208#else
209 return 0;
210#endif
211}
212
213#define dma_alloc_noncoherent(d, s, h, f) dma_alloc_coherent(d, s, h, f)
214#define dma_free_noncoherent(d, s, v, h) dma_free_coherent(d, s, v, h)
215#ifdef CONFIG_NOT_COHERENT_CACHE
Ralf Baechlef67637e2006-12-06 20:38:54 -0800216#define dma_is_consistent(d, h) (0)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100217#else
Ralf Baechlef67637e2006-12-06 20:38:54 -0800218#define dma_is_consistent(d, h) (1)
Stephen Rothwell78b09732005-11-19 01:40:46 +1100219#endif
220
221static inline int dma_get_cache_alignment(void)
222{
223#ifdef CONFIG_PPC64
224 /* no easy way to get cache size on all processors, so return
225 * the maximum possible, to be safe */
Ravikiran G Thirumalai1fd73c62006-01-08 01:01:28 -0800226 return (1 << INTERNODE_CACHE_SHIFT);
Stephen Rothwell78b09732005-11-19 01:40:46 +1100227#else
228 /*
229 * Each processor family will define its own L1_CACHE_SHIFT,
230 * L1_CACHE_BYTES wraps to this, so this is always safe.
231 */
232 return L1_CACHE_BYTES;
233#endif
234}
235
236static inline void dma_sync_single_range_for_cpu(struct device *dev,
237 dma_addr_t dma_handle, unsigned long offset, size_t size,
238 enum dma_data_direction direction)
239{
240 /* just sync everything for now */
241 dma_sync_single_for_cpu(dev, dma_handle, offset + size, direction);
242}
243
244static inline void dma_sync_single_range_for_device(struct device *dev,
245 dma_addr_t dma_handle, unsigned long offset, size_t size,
246 enum dma_data_direction direction)
247{
248 /* just sync everything for now */
249 dma_sync_single_for_device(dev, dma_handle, offset + size, direction);
250}
251
Ralf Baechled3fa72e2006-12-06 20:38:56 -0800252static inline void dma_cache_sync(struct device *dev, void *vaddr, size_t size,
Stephen Rothwell78b09732005-11-19 01:40:46 +1100253 enum dma_data_direction direction)
254{
255 BUG_ON(direction == DMA_NONE);
256 __dma_sync(vaddr, size, (int)direction);
257}
258
Arnd Bergmann88ced032005-12-16 22:43:46 +0100259#endif /* __KERNEL__ */
Stephen Rothwell78b09732005-11-19 01:40:46 +1100260#endif /* _ASM_DMA_MAPPING_H */