blob: cd4ea8474f89377fd3c7fef67e5ba85563d02178 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 2000 Ani Joshi <ajoshi@unixbox.com>
7 * Copyright (C) 2000, 2001 Ralf Baechle <ralf@gnu.org>
8 * swiped from i386, and cloned for MIPS by Geert, polished by Ralf.
9 */
10#include <linux/config.h>
11#include <linux/types.h>
12#include <linux/mm.h>
13#include <linux/module.h>
14#include <linux/string.h>
15#include <linux/dma-mapping.h>
16
17#include <asm/cache.h>
18#include <asm/io.h>
19
20/*
21 * Warning on the terminology - Linux calls an uncached area coherent;
22 * MIPS terminology calls memory areas with hardware maintained coherency
23 * coherent.
24 */
25
26void *dma_alloc_noncoherent(struct device *dev, size_t size,
Al Viro185a8ff2005-10-21 03:21:23 -040027 dma_addr_t * dma_handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070028{
29 void *ret;
30 /* ignore region specifiers */
31 gfp &= ~(__GFP_DMA | __GFP_HIGHMEM);
32
33 if (dev == NULL || (dev->coherent_dma_mask < 0xffffffff))
34 gfp |= GFP_DMA;
35 ret = (void *) __get_free_pages(gfp, get_order(size));
36
37 if (ret != NULL) {
38 memset(ret, 0, size);
39 *dma_handle = virt_to_phys(ret);
40 }
41
42 return ret;
43}
44
45EXPORT_SYMBOL(dma_alloc_noncoherent);
46
47void *dma_alloc_coherent(struct device *dev, size_t size,
Al Viro185a8ff2005-10-21 03:21:23 -040048 dma_addr_t * dma_handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070049{
50 void *ret;
51
52 ret = dma_alloc_noncoherent(dev, size, dma_handle, gfp);
53 if (ret) {
54 dma_cache_wback_inv((unsigned long) ret, size);
55 ret = UNCAC_ADDR(ret);
56 }
57
58 return ret;
59}
60
61EXPORT_SYMBOL(dma_alloc_coherent);
62
63void dma_free_noncoherent(struct device *dev, size_t size, void *vaddr,
64 dma_addr_t dma_handle)
65{
66 free_pages((unsigned long) vaddr, get_order(size));
67}
68
69EXPORT_SYMBOL(dma_free_noncoherent);
70
71void dma_free_coherent(struct device *dev, size_t size, void *vaddr,
72 dma_addr_t dma_handle)
73{
74 unsigned long addr = (unsigned long) vaddr;
75
76 addr = CAC_ADDR(addr);
77 free_pages(addr, get_order(size));
78}
79
80EXPORT_SYMBOL(dma_free_coherent);
81
82static inline void __dma_sync(unsigned long addr, size_t size,
83 enum dma_data_direction direction)
84{
85 switch (direction) {
86 case DMA_TO_DEVICE:
87 dma_cache_wback(addr, size);
88 break;
89
90 case DMA_FROM_DEVICE:
91 dma_cache_inv(addr, size);
92 break;
93
94 case DMA_BIDIRECTIONAL:
95 dma_cache_wback_inv(addr, size);
96 break;
97
98 default:
99 BUG();
100 }
101}
102
103dma_addr_t dma_map_single(struct device *dev, void *ptr, size_t size,
104 enum dma_data_direction direction)
105{
106 unsigned long addr = (unsigned long) ptr;
107
Thiemo Seufer424cada2005-09-09 17:09:18 +0000108 __dma_sync(addr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109
110 return virt_to_phys(ptr);
111}
112
113EXPORT_SYMBOL(dma_map_single);
114
115void dma_unmap_single(struct device *dev, dma_addr_t dma_addr, size_t size,
116 enum dma_data_direction direction)
117{
118 unsigned long addr;
119 addr = dma_addr + PAGE_OFFSET;
120
Thiemo Seufer424cada2005-09-09 17:09:18 +0000121 //__dma_sync(addr, size, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122}
123
124EXPORT_SYMBOL(dma_unmap_single);
125
126int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
127 enum dma_data_direction direction)
128{
129 int i;
130
131 BUG_ON(direction == DMA_NONE);
132
133 for (i = 0; i < nents; i++, sg++) {
134 unsigned long addr;
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 addr = (unsigned long) page_address(sg->page);
Thiemo Seufer424cada2005-09-09 17:09:18 +0000137 if (addr) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 __dma_sync(addr + sg->offset, sg->length, direction);
Thiemo Seufer424cada2005-09-09 17:09:18 +0000139 sg->dma_address = (dma_addr_t)page_to_phys(sg->page)
140 + sg->offset;
141 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 }
143
144 return nents;
145}
146
147EXPORT_SYMBOL(dma_map_sg);
148
149dma_addr_t dma_map_page(struct device *dev, struct page *page,
150 unsigned long offset, size_t size, enum dma_data_direction direction)
151{
152 unsigned long addr;
153
154 BUG_ON(direction == DMA_NONE);
155
156 addr = (unsigned long) page_address(page) + offset;
157 dma_cache_wback_inv(addr, size);
158
159 return page_to_phys(page) + offset;
160}
161
162EXPORT_SYMBOL(dma_map_page);
163
164void dma_unmap_page(struct device *dev, dma_addr_t dma_address, size_t size,
165 enum dma_data_direction direction)
166{
167 BUG_ON(direction == DMA_NONE);
168
169 if (direction != DMA_TO_DEVICE) {
170 unsigned long addr;
171
172 addr = dma_address + PAGE_OFFSET;
173 dma_cache_wback_inv(addr, size);
174 }
175}
176
177EXPORT_SYMBOL(dma_unmap_page);
178
179void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nhwentries,
180 enum dma_data_direction direction)
181{
182 unsigned long addr;
183 int i;
184
185 BUG_ON(direction == DMA_NONE);
186
187 if (direction == DMA_TO_DEVICE)
188 return;
189
190 for (i = 0; i < nhwentries; i++, sg++) {
191 addr = (unsigned long) page_address(sg->page);
Thiemo Seufer424cada2005-09-09 17:09:18 +0000192 if (addr)
193 __dma_sync(addr + sg->offset, sg->length, direction);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 }
195}
196
197EXPORT_SYMBOL(dma_unmap_sg);
198
199void dma_sync_single_for_cpu(struct device *dev, dma_addr_t dma_handle,
200 size_t size, enum dma_data_direction direction)
201{
202 unsigned long addr;
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 BUG_ON(direction == DMA_NONE);
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 addr = dma_handle + PAGE_OFFSET;
207 __dma_sync(addr, size, direction);
208}
209
210EXPORT_SYMBOL(dma_sync_single_for_cpu);
211
212void dma_sync_single_for_device(struct device *dev, dma_addr_t dma_handle,
213 size_t size, enum dma_data_direction direction)
214{
215 unsigned long addr;
216
217 BUG_ON(direction == DMA_NONE);
218
219 addr = dma_handle + PAGE_OFFSET;
220 __dma_sync(addr, size, direction);
221}
222
223EXPORT_SYMBOL(dma_sync_single_for_device);
224
225void dma_sync_single_range_for_cpu(struct device *dev, dma_addr_t dma_handle,
226 unsigned long offset, size_t size, enum dma_data_direction direction)
227{
228 unsigned long addr;
229
230 BUG_ON(direction == DMA_NONE);
231
232 addr = dma_handle + offset + PAGE_OFFSET;
233 __dma_sync(addr, size, direction);
234}
235
236EXPORT_SYMBOL(dma_sync_single_range_for_cpu);
237
238void dma_sync_single_range_for_device(struct device *dev, dma_addr_t dma_handle,
239 unsigned long offset, size_t size, enum dma_data_direction direction)
240{
241 unsigned long addr;
242
243 BUG_ON(direction == DMA_NONE);
244
245 addr = dma_handle + offset + PAGE_OFFSET;
246 __dma_sync(addr, size, direction);
247}
248
249EXPORT_SYMBOL(dma_sync_single_range_for_device);
250
251void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, int nelems,
252 enum dma_data_direction direction)
253{
254 int i;
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700255
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256 BUG_ON(direction == DMA_NONE);
Ralf Baechle42a3b4f2005-09-03 15:56:17 -0700257
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 /* Make sure that gcc doesn't leave the empty loop body. */
259 for (i = 0; i < nelems; i++, sg++)
260 __dma_sync((unsigned long)page_address(sg->page),
261 sg->length, direction);
262}
263
264EXPORT_SYMBOL(dma_sync_sg_for_cpu);
265
266void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, int nelems,
267 enum dma_data_direction direction)
268{
269 int i;
270
271 BUG_ON(direction == DMA_NONE);
272
273 /* Make sure that gcc doesn't leave the empty loop body. */
274 for (i = 0; i < nelems; i++, sg++)
275 __dma_sync((unsigned long)page_address(sg->page),
276 sg->length, direction);
277}
278
279EXPORT_SYMBOL(dma_sync_sg_for_device);
280
281int dma_mapping_error(dma_addr_t dma_addr)
282{
283 return 0;
284}
285
286EXPORT_SYMBOL(dma_mapping_error);
287
288int dma_supported(struct device *dev, u64 mask)
289{
290 /*
291 * we fall back to GFP_DMA when the mask isn't all 1s,
292 * so we can't guarantee allocations that must be
293 * within a tighter range than GFP_DMA..
294 */
295 if (mask < 0x00ffffff)
296 return 0;
297
298 return 1;
299}
300
301EXPORT_SYMBOL(dma_supported);
302
303int dma_is_consistent(dma_addr_t dma_addr)
304{
305 return 1;
306}
307
308EXPORT_SYMBOL(dma_is_consistent);
309
310void dma_cache_sync(void *vaddr, size_t size, enum dma_data_direction direction)
311{
312 if (direction == DMA_NONE)
313 return;
314
315 dma_cache_wback_inv((unsigned long)vaddr, size);
316}
317
318EXPORT_SYMBOL(dma_cache_sync);
319
320/* The DAC routines are a PCIism.. */
321
322#ifdef CONFIG_PCI
323
324#include <linux/pci.h>
325
326dma64_addr_t pci_dac_page_to_dma(struct pci_dev *pdev,
327 struct page *page, unsigned long offset, int direction)
328{
329 return (dma64_addr_t)page_to_phys(page) + offset;
330}
331
332EXPORT_SYMBOL(pci_dac_page_to_dma);
333
334struct page *pci_dac_dma_to_page(struct pci_dev *pdev,
335 dma64_addr_t dma_addr)
336{
337 return mem_map + (dma_addr >> PAGE_SHIFT);
338}
339
340EXPORT_SYMBOL(pci_dac_dma_to_page);
341
342unsigned long pci_dac_dma_to_offset(struct pci_dev *pdev,
343 dma64_addr_t dma_addr)
344{
345 return dma_addr & ~PAGE_MASK;
346}
347
348EXPORT_SYMBOL(pci_dac_dma_to_offset);
349
350void pci_dac_dma_sync_single_for_cpu(struct pci_dev *pdev,
351 dma64_addr_t dma_addr, size_t len, int direction)
352{
353 BUG_ON(direction == PCI_DMA_NONE);
354
355 dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
356}
357
358EXPORT_SYMBOL(pci_dac_dma_sync_single_for_cpu);
359
360void pci_dac_dma_sync_single_for_device(struct pci_dev *pdev,
361 dma64_addr_t dma_addr, size_t len, int direction)
362{
363 BUG_ON(direction == PCI_DMA_NONE);
364
365 dma_cache_wback_inv(dma_addr + PAGE_OFFSET, len);
366}
367
368EXPORT_SYMBOL(pci_dac_dma_sync_single_for_device);
369
370#endif /* CONFIG_PCI */