blob: 0da7eccf7749103d26e9545560256f0a8b47dce4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King0ddbccd2008-09-25 15:59:19 +01002 * linux/arch/arm/mm/dma-mapping.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA uncached mapping support.
11 */
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/errno.h>
16#include <linux/list.h>
17#include <linux/init.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20
Lennert Buytenhek23759dc2006-04-02 00:07:39 +010021#include <asm/memory.h>
Nicolas Pitre43377452009-03-12 22:52:09 -040022#include <asm/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/tlbflush.h>
Kevin Hilman37134cd2006-01-12 16:12:21 +000025#include <asm/sizes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Kevin Hilman37134cd2006-01-12 16:12:21 +000027/* Sanity check size */
28#if (CONSISTENT_DMA_SIZE % SZ_2M)
29#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
30#endif
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
Kevin Hilman37134cd2006-01-12 16:12:21 +000033#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
34#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
35
Catalin Marinasab6494f2009-07-24 12:35:02 +010036static u64 get_coherent_dma_mask(struct device *dev)
37{
38 u64 mask = ISA_DMA_THRESHOLD;
Linus Torvalds1da177e2005-04-16 15:20:36 -070039
Catalin Marinasab6494f2009-07-24 12:35:02 +010040 if (dev) {
41 mask = dev->coherent_dma_mask;
42
43 /*
44 * Sanity check the DMA mask - it must be non-zero, and
45 * must be able to be satisfied by a DMA allocation.
46 */
47 if (mask == 0) {
48 dev_warn(dev, "coherent DMA mask is unset\n");
49 return 0;
50 }
51
52 if ((~mask) & ISA_DMA_THRESHOLD) {
53 dev_warn(dev, "coherent DMA mask %#llx is smaller "
54 "than system GFP_DMA mask %#llx\n",
55 mask, (unsigned long long)ISA_DMA_THRESHOLD);
56 return 0;
57 }
58 }
59
60 return mask;
61}
62
Russell King7a9a32a2009-11-19 15:31:07 +000063/*
64 * Allocate a DMA buffer for 'dev' of size 'size' using the
65 * specified gfp mask. Note that 'size' must be page aligned.
66 */
67static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
68{
69 unsigned long order = get_order(size);
70 struct page *page, *p, *e;
71 void *ptr;
72 u64 mask = get_coherent_dma_mask(dev);
73
74#ifdef CONFIG_DMA_API_DEBUG
75 u64 limit = (mask + 1) & ~mask;
76 if (limit && size >= limit) {
77 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
78 size, mask);
79 return NULL;
80 }
81#endif
82
83 if (!mask)
84 return NULL;
85
86 if (mask < 0xffffffffULL)
87 gfp |= GFP_DMA;
88
89 page = alloc_pages(gfp, order);
90 if (!page)
91 return NULL;
92
93 /*
94 * Now split the huge page and free the excess pages
95 */
96 split_page(page, order);
97 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
98 __free_page(p);
99
100 /*
101 * Ensure that the allocated pages are zeroed, and that any data
102 * lurking in the kernel direct-mapped region is invalidated.
103 */
104 ptr = page_address(page);
105 memset(ptr, 0, size);
106 dmac_flush_range(ptr, ptr + size);
107 outer_flush_range(__pa(ptr), __pa(ptr) + size);
108
109 return page;
110}
111
112/*
113 * Free a DMA buffer. 'size' must be page aligned.
114 */
115static void __dma_free_buffer(struct page *page, size_t size)
116{
117 struct page *e = page + (size >> PAGE_SHIFT);
118
119 while (page < e) {
120 __free_page(page);
121 page++;
122 }
123}
124
Catalin Marinasab6494f2009-07-24 12:35:02 +0100125#ifdef CONFIG_MMU
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126/*
Kevin Hilman37134cd2006-01-12 16:12:21 +0000127 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 */
Kevin Hilman37134cd2006-01-12 16:12:21 +0000129static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Russell King13ccf3a2009-11-19 15:07:04 +0000131#include "vmregion.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Russell King13ccf3a2009-11-19 15:07:04 +0000133static struct arm_vmregion_head consistent_head = {
134 .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
136 .vm_start = CONSISTENT_BASE,
137 .vm_end = CONSISTENT_END,
138};
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140#ifdef CONFIG_HUGETLB_PAGE
141#error ARM Coherent DMA allocator does not (yet) support huge TLB
142#endif
143
Russell King88c58f32009-11-19 16:46:02 +0000144/*
145 * Initialise the consistent memory allocation.
146 */
147static int __init consistent_init(void)
148{
149 int ret = 0;
150 pgd_t *pgd;
151 pmd_t *pmd;
152 pte_t *pte;
153 int i = 0;
154 u32 base = CONSISTENT_BASE;
155
156 do {
157 pgd = pgd_offset(&init_mm, base);
158 pmd = pmd_alloc(&init_mm, pgd, base);
159 if (!pmd) {
160 printk(KERN_ERR "%s: no pmd tables\n", __func__);
161 ret = -ENOMEM;
162 break;
163 }
164 WARN_ON(!pmd_none(*pmd));
165
166 pte = pte_alloc_kernel(pmd, base);
167 if (!pte) {
168 printk(KERN_ERR "%s: no pte tables\n", __func__);
169 ret = -ENOMEM;
170 break;
171 }
172
173 consistent_pte[i++] = pte;
174 base += (1 << PGDIR_SHIFT);
175 } while (base < CONSISTENT_END);
176
177 return ret;
178}
179
180core_initcall(consistent_init);
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182static void *
Russell King31ebf942009-11-19 21:12:17 +0000183__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184{
Russell King13ccf3a2009-11-19 15:07:04 +0000185 struct arm_vmregion *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Russell Kingebd7a842009-11-19 20:58:31 +0000187 if (!consistent_pte[0]) {
188 printk(KERN_ERR "%s: not initialised\n", __func__);
189 dump_stack();
Russell Kingebd7a842009-11-19 20:58:31 +0000190 return NULL;
191 }
192
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 * Allocate a virtual address in the consistent mapping region.
195 */
Russell King13ccf3a2009-11-19 15:07:04 +0000196 c = arm_vmregion_alloc(&consistent_head, size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
198 if (c) {
Kevin Hilman37134cd2006-01-12 16:12:21 +0000199 pte_t *pte;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000200 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
201 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202
Kevin Hilman37134cd2006-01-12 16:12:21 +0000203 pte = consistent_pte[idx] + off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 c->vm_pages = page;
205
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 do {
207 BUG_ON(!pte_none(*pte));
208
Russell Kingad1ae2f2006-12-13 14:34:43 +0000209 set_pte_ext(pte, mk_pte(page, prot), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 page++;
211 pte++;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000212 off++;
213 if (off >= PTRS_PER_PTE) {
214 off = 0;
215 pte = consistent_pte[++idx];
216 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 } while (size -= PAGE_SIZE);
218
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 return (void *)c->vm_start;
220 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 return NULL;
222}
Russell King695ae0a2009-11-19 16:31:39 +0000223
224static void __dma_free_remap(void *cpu_addr, size_t size)
225{
226 struct arm_vmregion *c;
227 unsigned long addr;
228 pte_t *ptep;
229 int idx;
230 u32 off;
231
232 c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
233 if (!c) {
234 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
235 __func__, cpu_addr);
236 dump_stack();
237 return;
238 }
239
240 if ((c->vm_end - c->vm_start) != size) {
241 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
242 __func__, c->vm_end - c->vm_start, size);
243 dump_stack();
244 size = c->vm_end - c->vm_start;
245 }
246
247 idx = CONSISTENT_PTE_INDEX(c->vm_start);
248 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
249 ptep = consistent_pte[idx] + off;
250 addr = c->vm_start;
251 do {
252 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
Russell King695ae0a2009-11-19 16:31:39 +0000253
254 ptep++;
255 addr += PAGE_SIZE;
256 off++;
257 if (off >= PTRS_PER_PTE) {
258 off = 0;
259 ptep = consistent_pte[++idx];
260 }
261
Russell Kingacaac252009-11-20 18:19:52 +0000262 if (pte_none(pte) || !pte_present(pte))
263 printk(KERN_CRIT "%s: bad page in kernel page table\n",
264 __func__);
Russell King695ae0a2009-11-19 16:31:39 +0000265 } while (size -= PAGE_SIZE);
266
267 flush_tlb_kernel_range(c->vm_start, c->vm_end);
268
269 arm_vmregion_free(&consistent_head, c);
270}
271
Catalin Marinasab6494f2009-07-24 12:35:02 +0100272#else /* !CONFIG_MMU */
Russell King695ae0a2009-11-19 16:31:39 +0000273
Russell King31ebf942009-11-19 21:12:17 +0000274#define __dma_alloc_remap(page, size, gfp, prot) page_address(page)
275#define __dma_free_remap(addr, size) do { } while (0)
276
277#endif /* CONFIG_MMU */
278
Catalin Marinasab6494f2009-07-24 12:35:02 +0100279static void *
280__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
281 pgprot_t prot)
282{
Russell King04da5692009-11-19 15:54:45 +0000283 struct page *page;
Russell King31ebf942009-11-19 21:12:17 +0000284 void *addr;
Catalin Marinasab6494f2009-07-24 12:35:02 +0100285
Catalin Marinasab6494f2009-07-24 12:35:02 +0100286 *handle = ~0;
Russell King04da5692009-11-19 15:54:45 +0000287 size = PAGE_ALIGN(size);
288
289 page = __dma_alloc_buffer(dev, size, gfp);
290 if (!page)
291 return NULL;
292
Russell King31ebf942009-11-19 21:12:17 +0000293 if (!arch_is_coherent())
294 addr = __dma_alloc_remap(page, size, gfp, prot);
295 else
296 addr = page_address(page);
297
298 if (addr)
299 *handle = page_to_dma(dev, page);
300
301 return addr;
Catalin Marinasab6494f2009-07-24 12:35:02 +0100302}
Russell King695ae0a2009-11-19 16:31:39 +0000303
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304/*
305 * Allocate DMA-coherent memory space and return both the kernel remapped
306 * virtual and bus address for that space.
307 */
308void *
Al Virof9e32142005-10-21 03:20:58 -0400309dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310{
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400311 void *memory;
312
313 if (dma_alloc_from_coherent(dev, size, handle, &memory))
314 return memory;
315
Linus Torvalds1da177e2005-04-16 15:20:36 -0700316 return __dma_alloc(dev, size, handle, gfp,
Russell King26a26d32009-11-20 21:06:43 +0000317 pgprot_dmacoherent(pgprot_kernel));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318}
319EXPORT_SYMBOL(dma_alloc_coherent);
320
321/*
322 * Allocate a writecombining region, in much the same way as
323 * dma_alloc_coherent above.
324 */
325void *
Al Virof9e32142005-10-21 03:20:58 -0400326dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
328 return __dma_alloc(dev, size, handle, gfp,
329 pgprot_writecombine(pgprot_kernel));
330}
331EXPORT_SYMBOL(dma_alloc_writecombine);
332
333static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
334 void *cpu_addr, dma_addr_t dma_addr, size_t size)
335{
Catalin Marinasab6494f2009-07-24 12:35:02 +0100336 int ret = -ENXIO;
337#ifdef CONFIG_MMU
Russell King13ccf3a2009-11-19 15:07:04 +0000338 unsigned long user_size, kern_size;
339 struct arm_vmregion *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
341 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
342
Russell King13ccf3a2009-11-19 15:07:04 +0000343 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 if (c) {
345 unsigned long off = vma->vm_pgoff;
346
347 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
348
349 if (off < kern_size &&
350 user_size <= (kern_size - off)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 ret = remap_pfn_range(vma, vma->vm_start,
352 page_to_pfn(c->vm_pages) + off,
353 user_size << PAGE_SHIFT,
354 vma->vm_page_prot);
355 }
356 }
Catalin Marinasab6494f2009-07-24 12:35:02 +0100357#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358
359 return ret;
360}
361
362int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
363 void *cpu_addr, dma_addr_t dma_addr, size_t size)
364{
Russell King26a26d32009-11-20 21:06:43 +0000365 vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
367}
368EXPORT_SYMBOL(dma_mmap_coherent);
369
370int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
371 void *cpu_addr, dma_addr_t dma_addr, size_t size)
372{
373 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
374 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
375}
376EXPORT_SYMBOL(dma_mmap_writecombine);
377
378/*
379 * free a page as defined by the above mapping.
Russell King5edf71a2005-11-25 15:52:51 +0000380 * Must not be called with IRQs disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381 */
382void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
383{
Russell King5edf71a2005-11-25 15:52:51 +0000384 WARN_ON(irqs_disabled());
385
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400386 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
387 return;
388
Russell King3e82d012009-11-19 15:38:12 +0000389 size = PAGE_ALIGN(size);
390
Russell King695ae0a2009-11-19 16:31:39 +0000391 if (!arch_is_coherent())
392 __dma_free_remap(cpu_addr, size);
Russell King7a9a32a2009-11-19 15:31:07 +0000393
394 __dma_free_buffer(dma_to_page(dev, handle), size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700395}
396EXPORT_SYMBOL(dma_free_coherent);
397
398/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 * Make an area consistent for devices.
Dan Williams105ef9a2006-11-21 22:57:23 +0100400 * Note: Drivers should NOT use this function directly, as it will break
401 * platforms with CONFIG_DMABOUNCE.
402 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 */
Russell King4ea0d732009-11-24 16:27:17 +0000404void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
405 enum dma_data_direction dir)
406{
Russell King2ffe2da2009-10-31 16:52:16 +0000407 unsigned long paddr;
408
Russell Kinga9c91472009-11-26 16:19:58 +0000409 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
410
411 dmac_map_area(kaddr, size, dir);
Russell King2ffe2da2009-10-31 16:52:16 +0000412
413 paddr = __pa(kaddr);
414 if (dir == DMA_FROM_DEVICE) {
415 outer_inv_range(paddr, paddr + size);
416 } else {
417 outer_clean_range(paddr, paddr + size);
418 }
419 /* FIXME: non-speculating: flush on bidirectional mappings? */
Russell King4ea0d732009-11-24 16:27:17 +0000420}
421EXPORT_SYMBOL(___dma_single_cpu_to_dev);
422
423void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
424 enum dma_data_direction dir)
425{
Russell Kinga9c91472009-11-26 16:19:58 +0000426 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
427
Russell King2ffe2da2009-10-31 16:52:16 +0000428 /* FIXME: non-speculating: not required */
429 /* don't bother invalidating if DMA to device */
430 if (dir != DMA_TO_DEVICE) {
431 unsigned long paddr = __pa(kaddr);
432 outer_inv_range(paddr, paddr + size);
433 }
434
Russell Kinga9c91472009-11-26 16:19:58 +0000435 dmac_unmap_area(kaddr, size, dir);
Russell King4ea0d732009-11-24 16:27:17 +0000436}
437EXPORT_SYMBOL(___dma_single_dev_to_cpu);
Russell Kingafd1a322008-09-25 16:30:57 +0100438
Russell King65af1912009-11-24 17:53:33 +0000439static void dma_cache_maint_page(struct page *page, unsigned long offset,
Russell Kinga9c91472009-11-26 16:19:58 +0000440 size_t size, enum dma_data_direction dir,
441 void (*op)(const void *, size_t, int))
Russell King65af1912009-11-24 17:53:33 +0000442{
443 /*
444 * A single sg entry may refer to multiple physically contiguous
445 * pages. But we still need to process highmem pages individually.
446 * If highmem is not configured then the bulk of this loop gets
447 * optimized out.
448 */
449 size_t left = size;
450 do {
451 size_t len = left;
Russell King93f1d622009-11-24 14:41:01 +0000452 void *vaddr;
453
454 if (PageHighMem(page)) {
455 if (len + offset > PAGE_SIZE) {
456 if (offset >= PAGE_SIZE) {
457 page += offset / PAGE_SIZE;
458 offset %= PAGE_SIZE;
459 }
460 len = PAGE_SIZE - offset;
Russell King65af1912009-11-24 17:53:33 +0000461 }
Russell King93f1d622009-11-24 14:41:01 +0000462 vaddr = kmap_high_get(page);
463 if (vaddr) {
464 vaddr += offset;
Russell Kinga9c91472009-11-26 16:19:58 +0000465 op(vaddr, len, dir);
Russell King93f1d622009-11-24 14:41:01 +0000466 kunmap_high(page);
467 }
468 } else {
469 vaddr = page_address(page) + offset;
Russell Kinga9c91472009-11-26 16:19:58 +0000470 op(vaddr, len, dir);
Russell King65af1912009-11-24 17:53:33 +0000471 }
Russell King65af1912009-11-24 17:53:33 +0000472 offset = 0;
473 page++;
474 left -= len;
475 } while (left);
476}
477
478void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
479 size_t size, enum dma_data_direction dir)
480{
Nicolas Pitre43377452009-03-12 22:52:09 -0400481 unsigned long paddr;
Nicolas Pitre43377452009-03-12 22:52:09 -0400482
Russell Kinga9c91472009-11-26 16:19:58 +0000483 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
Nicolas Pitre43377452009-03-12 22:52:09 -0400484
Russell King65af1912009-11-24 17:53:33 +0000485 paddr = page_to_phys(page) + off;
Russell King2ffe2da2009-10-31 16:52:16 +0000486 if (dir == DMA_FROM_DEVICE) {
487 outer_inv_range(paddr, paddr + size);
488 } else {
489 outer_clean_range(paddr, paddr + size);
490 }
491 /* FIXME: non-speculating: flush on bidirectional mappings? */
Nicolas Pitre43377452009-03-12 22:52:09 -0400492}
Russell King4ea0d732009-11-24 16:27:17 +0000493EXPORT_SYMBOL(___dma_page_cpu_to_dev);
494
495void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
496 size_t size, enum dma_data_direction dir)
497{
Russell King2ffe2da2009-10-31 16:52:16 +0000498 unsigned long paddr = page_to_phys(page) + off;
499
500 /* FIXME: non-speculating: not required */
501 /* don't bother invalidating if DMA to device */
502 if (dir != DMA_TO_DEVICE)
503 outer_inv_range(paddr, paddr + size);
504
Russell Kinga9c91472009-11-26 16:19:58 +0000505 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
Russell King4ea0d732009-11-24 16:27:17 +0000506}
507EXPORT_SYMBOL(___dma_page_dev_to_cpu);
Nicolas Pitre43377452009-03-12 22:52:09 -0400508
Russell Kingafd1a322008-09-25 16:30:57 +0100509/**
510 * dma_map_sg - map a set of SG buffers for streaming mode DMA
511 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
512 * @sg: list of buffers
513 * @nents: number of buffers to map
514 * @dir: DMA transfer direction
515 *
516 * Map a set of buffers described by scatterlist in streaming mode for DMA.
517 * This is the scatter-gather version of the dma_map_single interface.
518 * Here the scatter gather list elements are each tagged with the
519 * appropriate dma address and length. They are obtained via
520 * sg_dma_{address,length}.
521 *
522 * Device ownership issues as mentioned for dma_map_single are the same
523 * here.
524 */
525int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
526 enum dma_data_direction dir)
527{
528 struct scatterlist *s;
Russell King01135d92008-09-25 21:05:02 +0100529 int i, j;
Russell Kingafd1a322008-09-25 16:30:57 +0100530
531 for_each_sg(sg, s, nents, i) {
Russell King01135d92008-09-25 21:05:02 +0100532 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
533 s->length, dir);
534 if (dma_mapping_error(dev, s->dma_address))
535 goto bad_mapping;
Russell Kingafd1a322008-09-25 16:30:57 +0100536 }
Russell Kingafd1a322008-09-25 16:30:57 +0100537 return nents;
Russell King01135d92008-09-25 21:05:02 +0100538
539 bad_mapping:
540 for_each_sg(sg, s, i, j)
541 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
542 return 0;
Russell Kingafd1a322008-09-25 16:30:57 +0100543}
544EXPORT_SYMBOL(dma_map_sg);
545
546/**
547 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
548 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
549 * @sg: list of buffers
550 * @nents: number of buffers to unmap (returned from dma_map_sg)
551 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
552 *
553 * Unmap a set of streaming mode DMA translations. Again, CPU access
554 * rules concerning calls here are the same as for dma_unmap_single().
555 */
556void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
557 enum dma_data_direction dir)
558{
Russell King01135d92008-09-25 21:05:02 +0100559 struct scatterlist *s;
560 int i;
561
562 for_each_sg(sg, s, nents, i)
563 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100564}
565EXPORT_SYMBOL(dma_unmap_sg);
566
567/**
568 * dma_sync_sg_for_cpu
569 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
570 * @sg: list of buffers
571 * @nents: number of buffers to map (returned from dma_map_sg)
572 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
573 */
574void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
575 int nents, enum dma_data_direction dir)
576{
577 struct scatterlist *s;
578 int i;
579
580 for_each_sg(sg, s, nents, i) {
Russell King18eabe22009-10-31 16:52:16 +0000581 if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
582 sg_dma_len(s), dir))
583 continue;
584
585 __dma_page_dev_to_cpu(sg_page(s), s->offset,
586 s->length, dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100587 }
588}
589EXPORT_SYMBOL(dma_sync_sg_for_cpu);
590
591/**
592 * dma_sync_sg_for_device
593 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
594 * @sg: list of buffers
595 * @nents: number of buffers to map (returned from dma_map_sg)
596 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
597 */
598void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
599 int nents, enum dma_data_direction dir)
600{
601 struct scatterlist *s;
602 int i;
603
604 for_each_sg(sg, s, nents, i) {
Russell King2638b4d2008-09-25 21:38:41 +0100605 if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
606 sg_dma_len(s), dir))
607 continue;
608
Russell King18eabe22009-10-31 16:52:16 +0000609 __dma_page_cpu_to_dev(sg_page(s), s->offset,
610 s->length, dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100611 }
612}
613EXPORT_SYMBOL(dma_sync_sg_for_device);