blob: 707d8124763075aebe53d9b9bb555d3e23639ead [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King0ddbccd2008-09-25 15:59:19 +01002 * linux/arch/arm/mm/dma-mapping.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA uncached mapping support.
11 */
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/errno.h>
16#include <linux/list.h>
17#include <linux/init.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20
Lennert Buytenhek23759dc2006-04-02 00:07:39 +010021#include <asm/memory.h>
Nicolas Pitre43377452009-03-12 22:52:09 -040022#include <asm/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/tlbflush.h>
Kevin Hilman37134cd2006-01-12 16:12:21 +000025#include <asm/sizes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Kevin Hilman37134cd2006-01-12 16:12:21 +000027/* Sanity check size */
28#if (CONSISTENT_DMA_SIZE % SZ_2M)
29#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
30#endif
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#define CONSISTENT_END (0xffe00000)
Kevin Hilman37134cd2006-01-12 16:12:21 +000033#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
Kevin Hilman37134cd2006-01-12 16:12:21 +000036#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
37#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
38
Catalin Marinasab6494f2009-07-24 12:35:02 +010039static u64 get_coherent_dma_mask(struct device *dev)
40{
41 u64 mask = ISA_DMA_THRESHOLD;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Catalin Marinasab6494f2009-07-24 12:35:02 +010043 if (dev) {
44 mask = dev->coherent_dma_mask;
45
46 /*
47 * Sanity check the DMA mask - it must be non-zero, and
48 * must be able to be satisfied by a DMA allocation.
49 */
50 if (mask == 0) {
51 dev_warn(dev, "coherent DMA mask is unset\n");
52 return 0;
53 }
54
55 if ((~mask) & ISA_DMA_THRESHOLD) {
56 dev_warn(dev, "coherent DMA mask %#llx is smaller "
57 "than system GFP_DMA mask %#llx\n",
58 mask, (unsigned long long)ISA_DMA_THRESHOLD);
59 return 0;
60 }
61 }
62
63 return mask;
64}
65
Russell King7a9a32a2009-11-19 15:31:07 +000066/*
67 * Allocate a DMA buffer for 'dev' of size 'size' using the
68 * specified gfp mask. Note that 'size' must be page aligned.
69 */
70static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
71{
72 unsigned long order = get_order(size);
73 struct page *page, *p, *e;
74 void *ptr;
75 u64 mask = get_coherent_dma_mask(dev);
76
77#ifdef CONFIG_DMA_API_DEBUG
78 u64 limit = (mask + 1) & ~mask;
79 if (limit && size >= limit) {
80 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
81 size, mask);
82 return NULL;
83 }
84#endif
85
86 if (!mask)
87 return NULL;
88
89 if (mask < 0xffffffffULL)
90 gfp |= GFP_DMA;
91
92 page = alloc_pages(gfp, order);
93 if (!page)
94 return NULL;
95
96 /*
97 * Now split the huge page and free the excess pages
98 */
99 split_page(page, order);
100 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
101 __free_page(p);
102
103 /*
104 * Ensure that the allocated pages are zeroed, and that any data
105 * lurking in the kernel direct-mapped region is invalidated.
106 */
107 ptr = page_address(page);
108 memset(ptr, 0, size);
109 dmac_flush_range(ptr, ptr + size);
110 outer_flush_range(__pa(ptr), __pa(ptr) + size);
111
112 return page;
113}
114
115/*
116 * Free a DMA buffer. 'size' must be page aligned.
117 */
118static void __dma_free_buffer(struct page *page, size_t size)
119{
120 struct page *e = page + (size >> PAGE_SHIFT);
121
122 while (page < e) {
123 __free_page(page);
124 page++;
125 }
126}
127
Catalin Marinasab6494f2009-07-24 12:35:02 +0100128#ifdef CONFIG_MMU
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129/*
Kevin Hilman37134cd2006-01-12 16:12:21 +0000130 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 */
Kevin Hilman37134cd2006-01-12 16:12:21 +0000132static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Russell King13ccf3a2009-11-19 15:07:04 +0000134#include "vmregion.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Russell King13ccf3a2009-11-19 15:07:04 +0000136static struct arm_vmregion_head consistent_head = {
137 .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
139 .vm_start = CONSISTENT_BASE,
140 .vm_end = CONSISTENT_END,
141};
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143#ifdef CONFIG_HUGETLB_PAGE
144#error ARM Coherent DMA allocator does not (yet) support huge TLB
145#endif
146
Russell King88c58f32009-11-19 16:46:02 +0000147/*
148 * Initialise the consistent memory allocation.
149 */
150static int __init consistent_init(void)
151{
152 int ret = 0;
153 pgd_t *pgd;
154 pmd_t *pmd;
155 pte_t *pte;
156 int i = 0;
157 u32 base = CONSISTENT_BASE;
158
159 do {
160 pgd = pgd_offset(&init_mm, base);
161 pmd = pmd_alloc(&init_mm, pgd, base);
162 if (!pmd) {
163 printk(KERN_ERR "%s: no pmd tables\n", __func__);
164 ret = -ENOMEM;
165 break;
166 }
167 WARN_ON(!pmd_none(*pmd));
168
169 pte = pte_alloc_kernel(pmd, base);
170 if (!pte) {
171 printk(KERN_ERR "%s: no pte tables\n", __func__);
172 ret = -ENOMEM;
173 break;
174 }
175
176 consistent_pte[i++] = pte;
177 base += (1 << PGDIR_SHIFT);
178 } while (base < CONSISTENT_END);
179
180 return ret;
181}
182
183core_initcall(consistent_init);
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185static void *
Russell King31ebf942009-11-19 21:12:17 +0000186__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187{
Russell King13ccf3a2009-11-19 15:07:04 +0000188 struct arm_vmregion *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189
Russell Kingebd7a842009-11-19 20:58:31 +0000190 if (!consistent_pte[0]) {
191 printk(KERN_ERR "%s: not initialised\n", __func__);
192 dump_stack();
Russell Kingebd7a842009-11-19 20:58:31 +0000193 return NULL;
194 }
195
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 * Allocate a virtual address in the consistent mapping region.
198 */
Russell King13ccf3a2009-11-19 15:07:04 +0000199 c = arm_vmregion_alloc(&consistent_head, size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
201 if (c) {
Kevin Hilman37134cd2006-01-12 16:12:21 +0000202 pte_t *pte;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000203 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
204 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205
Kevin Hilman37134cd2006-01-12 16:12:21 +0000206 pte = consistent_pte[idx] + off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 c->vm_pages = page;
208
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 do {
210 BUG_ON(!pte_none(*pte));
211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /*
213 * x86 does not mark the pages reserved...
214 */
215 SetPageReserved(page);
Russell Kingad1ae2f2006-12-13 14:34:43 +0000216 set_pte_ext(pte, mk_pte(page, prot), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 page++;
218 pte++;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000219 off++;
220 if (off >= PTRS_PER_PTE) {
221 off = 0;
222 pte = consistent_pte[++idx];
223 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224 } while (size -= PAGE_SIZE);
225
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 return (void *)c->vm_start;
227 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 return NULL;
229}
Russell King695ae0a2009-11-19 16:31:39 +0000230
231static void __dma_free_remap(void *cpu_addr, size_t size)
232{
233 struct arm_vmregion *c;
234 unsigned long addr;
235 pte_t *ptep;
236 int idx;
237 u32 off;
238
239 c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
240 if (!c) {
241 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
242 __func__, cpu_addr);
243 dump_stack();
244 return;
245 }
246
247 if ((c->vm_end - c->vm_start) != size) {
248 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
249 __func__, c->vm_end - c->vm_start, size);
250 dump_stack();
251 size = c->vm_end - c->vm_start;
252 }
253
254 idx = CONSISTENT_PTE_INDEX(c->vm_start);
255 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
256 ptep = consistent_pte[idx] + off;
257 addr = c->vm_start;
258 do {
259 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
260 unsigned long pfn;
261
262 ptep++;
263 addr += PAGE_SIZE;
264 off++;
265 if (off >= PTRS_PER_PTE) {
266 off = 0;
267 ptep = consistent_pte[++idx];
268 }
269
270 if (!pte_none(pte) && pte_present(pte)) {
271 pfn = pte_pfn(pte);
272
273 if (pfn_valid(pfn)) {
274 struct page *page = pfn_to_page(pfn);
275
276 /*
277 * x86 does not mark the pages reserved...
278 */
279 ClearPageReserved(page);
280 continue;
281 }
282 }
283 printk(KERN_CRIT "%s: bad page in kernel page table\n",
284 __func__);
285 } while (size -= PAGE_SIZE);
286
287 flush_tlb_kernel_range(c->vm_start, c->vm_end);
288
289 arm_vmregion_free(&consistent_head, c);
290}
291
Catalin Marinasab6494f2009-07-24 12:35:02 +0100292#else /* !CONFIG_MMU */
Russell King695ae0a2009-11-19 16:31:39 +0000293
Russell King31ebf942009-11-19 21:12:17 +0000294#define __dma_alloc_remap(page, size, gfp, prot) page_address(page)
295#define __dma_free_remap(addr, size) do { } while (0)
296
297#endif /* CONFIG_MMU */
298
Catalin Marinasab6494f2009-07-24 12:35:02 +0100299static void *
300__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
301 pgprot_t prot)
302{
Russell King04da5692009-11-19 15:54:45 +0000303 struct page *page;
Russell King31ebf942009-11-19 21:12:17 +0000304 void *addr;
Catalin Marinasab6494f2009-07-24 12:35:02 +0100305
Catalin Marinasab6494f2009-07-24 12:35:02 +0100306 *handle = ~0;
Russell King04da5692009-11-19 15:54:45 +0000307 size = PAGE_ALIGN(size);
308
309 page = __dma_alloc_buffer(dev, size, gfp);
310 if (!page)
311 return NULL;
312
Russell King31ebf942009-11-19 21:12:17 +0000313 if (!arch_is_coherent())
314 addr = __dma_alloc_remap(page, size, gfp, prot);
315 else
316 addr = page_address(page);
317
318 if (addr)
319 *handle = page_to_dma(dev, page);
320
321 return addr;
Catalin Marinasab6494f2009-07-24 12:35:02 +0100322}
Russell King695ae0a2009-11-19 16:31:39 +0000323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324/*
325 * Allocate DMA-coherent memory space and return both the kernel remapped
326 * virtual and bus address for that space.
327 */
328void *
Al Virof9e32142005-10-21 03:20:58 -0400329dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330{
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400331 void *memory;
332
333 if (dma_alloc_from_coherent(dev, size, handle, &memory))
334 return memory;
335
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 return __dma_alloc(dev, size, handle, gfp,
337 pgprot_noncached(pgprot_kernel));
338}
339EXPORT_SYMBOL(dma_alloc_coherent);
340
341/*
342 * Allocate a writecombining region, in much the same way as
343 * dma_alloc_coherent above.
344 */
345void *
Al Virof9e32142005-10-21 03:20:58 -0400346dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347{
348 return __dma_alloc(dev, size, handle, gfp,
349 pgprot_writecombine(pgprot_kernel));
350}
351EXPORT_SYMBOL(dma_alloc_writecombine);
352
353static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
354 void *cpu_addr, dma_addr_t dma_addr, size_t size)
355{
Catalin Marinasab6494f2009-07-24 12:35:02 +0100356 int ret = -ENXIO;
357#ifdef CONFIG_MMU
Russell King13ccf3a2009-11-19 15:07:04 +0000358 unsigned long user_size, kern_size;
359 struct arm_vmregion *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
362
Russell King13ccf3a2009-11-19 15:07:04 +0000363 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 if (c) {
365 unsigned long off = vma->vm_pgoff;
366
367 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
368
369 if (off < kern_size &&
370 user_size <= (kern_size - off)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 ret = remap_pfn_range(vma, vma->vm_start,
372 page_to_pfn(c->vm_pages) + off,
373 user_size << PAGE_SHIFT,
374 vma->vm_page_prot);
375 }
376 }
Catalin Marinasab6494f2009-07-24 12:35:02 +0100377#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378
379 return ret;
380}
381
382int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
383 void *cpu_addr, dma_addr_t dma_addr, size_t size)
384{
385 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
386 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
387}
388EXPORT_SYMBOL(dma_mmap_coherent);
389
390int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
391 void *cpu_addr, dma_addr_t dma_addr, size_t size)
392{
393 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
394 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
395}
396EXPORT_SYMBOL(dma_mmap_writecombine);
397
398/*
399 * free a page as defined by the above mapping.
Russell King5edf71a2005-11-25 15:52:51 +0000400 * Must not be called with IRQs disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 */
402void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
403{
Russell King5edf71a2005-11-25 15:52:51 +0000404 WARN_ON(irqs_disabled());
405
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400406 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
407 return;
408
Russell King3e82d012009-11-19 15:38:12 +0000409 size = PAGE_ALIGN(size);
410
Russell King695ae0a2009-11-19 16:31:39 +0000411 if (!arch_is_coherent())
412 __dma_free_remap(cpu_addr, size);
Russell King7a9a32a2009-11-19 15:31:07 +0000413
414 __dma_free_buffer(dma_to_page(dev, handle), size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415}
416EXPORT_SYMBOL(dma_free_coherent);
417
418/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 * Make an area consistent for devices.
Dan Williams105ef9a2006-11-21 22:57:23 +0100420 * Note: Drivers should NOT use this function directly, as it will break
421 * platforms with CONFIG_DMABOUNCE.
422 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 */
Russell King84aa4622007-10-09 14:17:01 +0100424void dma_cache_maint(const void *start, size_t size, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425{
Russell King1522ac32009-03-12 17:03:48 +0000426 void (*inner_op)(const void *, const void *);
427 void (*outer_op)(unsigned long, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428
Russell King1522ac32009-03-12 17:03:48 +0000429 BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
Catalin Marinas953233d2007-02-05 14:48:08 +0100430
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 switch (direction) {
432 case DMA_FROM_DEVICE: /* invalidate only */
Russell King1522ac32009-03-12 17:03:48 +0000433 inner_op = dmac_inv_range;
434 outer_op = outer_inv_range;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435 break;
436 case DMA_TO_DEVICE: /* writeback only */
Russell King1522ac32009-03-12 17:03:48 +0000437 inner_op = dmac_clean_range;
438 outer_op = outer_clean_range;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 break;
440 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
Russell King1522ac32009-03-12 17:03:48 +0000441 inner_op = dmac_flush_range;
442 outer_op = outer_flush_range;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443 break;
444 default:
445 BUG();
446 }
Russell King1522ac32009-03-12 17:03:48 +0000447
448 inner_op(start, start + size);
449 outer_op(__pa(start), __pa(start) + size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450}
Russell King84aa4622007-10-09 14:17:01 +0100451EXPORT_SYMBOL(dma_cache_maint);
Russell Kingafd1a322008-09-25 16:30:57 +0100452
Nicolas Pitre43377452009-03-12 22:52:09 -0400453static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
454 size_t size, int direction)
455{
456 void *vaddr;
457 unsigned long paddr;
458 void (*inner_op)(const void *, const void *);
459 void (*outer_op)(unsigned long, unsigned long);
460
461 switch (direction) {
462 case DMA_FROM_DEVICE: /* invalidate only */
463 inner_op = dmac_inv_range;
464 outer_op = outer_inv_range;
465 break;
466 case DMA_TO_DEVICE: /* writeback only */
467 inner_op = dmac_clean_range;
468 outer_op = outer_clean_range;
469 break;
470 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
471 inner_op = dmac_flush_range;
472 outer_op = outer_flush_range;
473 break;
474 default:
475 BUG();
476 }
477
478 if (!PageHighMem(page)) {
479 vaddr = page_address(page) + offset;
480 inner_op(vaddr, vaddr + size);
481 } else {
482 vaddr = kmap_high_get(page);
483 if (vaddr) {
484 vaddr += offset;
485 inner_op(vaddr, vaddr + size);
486 kunmap_high(page);
487 }
488 }
489
490 paddr = page_to_phys(page) + offset;
491 outer_op(paddr, paddr + size);
492}
493
494void dma_cache_maint_page(struct page *page, unsigned long offset,
495 size_t size, int dir)
496{
497 /*
498 * A single sg entry may refer to multiple physically contiguous
499 * pages. But we still need to process highmem pages individually.
500 * If highmem is not configured then the bulk of this loop gets
501 * optimized out.
502 */
503 size_t left = size;
504 do {
505 size_t len = left;
506 if (PageHighMem(page) && len + offset > PAGE_SIZE) {
507 if (offset >= PAGE_SIZE) {
508 page += offset / PAGE_SIZE;
509 offset %= PAGE_SIZE;
510 }
511 len = PAGE_SIZE - offset;
512 }
513 dma_cache_maint_contiguous(page, offset, len, dir);
514 offset = 0;
515 page++;
516 left -= len;
517 } while (left);
518}
519EXPORT_SYMBOL(dma_cache_maint_page);
520
Russell Kingafd1a322008-09-25 16:30:57 +0100521/**
522 * dma_map_sg - map a set of SG buffers for streaming mode DMA
523 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
524 * @sg: list of buffers
525 * @nents: number of buffers to map
526 * @dir: DMA transfer direction
527 *
528 * Map a set of buffers described by scatterlist in streaming mode for DMA.
529 * This is the scatter-gather version of the dma_map_single interface.
530 * Here the scatter gather list elements are each tagged with the
531 * appropriate dma address and length. They are obtained via
532 * sg_dma_{address,length}.
533 *
534 * Device ownership issues as mentioned for dma_map_single are the same
535 * here.
536 */
537int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
538 enum dma_data_direction dir)
539{
540 struct scatterlist *s;
Russell King01135d922008-09-25 21:05:02 +0100541 int i, j;
Russell Kingafd1a322008-09-25 16:30:57 +0100542
543 for_each_sg(sg, s, nents, i) {
Russell King01135d922008-09-25 21:05:02 +0100544 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
545 s->length, dir);
546 if (dma_mapping_error(dev, s->dma_address))
547 goto bad_mapping;
Russell Kingafd1a322008-09-25 16:30:57 +0100548 }
Russell Kingafd1a322008-09-25 16:30:57 +0100549 return nents;
Russell King01135d922008-09-25 21:05:02 +0100550
551 bad_mapping:
552 for_each_sg(sg, s, i, j)
553 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
554 return 0;
Russell Kingafd1a322008-09-25 16:30:57 +0100555}
556EXPORT_SYMBOL(dma_map_sg);
557
558/**
559 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
560 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
561 * @sg: list of buffers
562 * @nents: number of buffers to unmap (returned from dma_map_sg)
563 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
564 *
565 * Unmap a set of streaming mode DMA translations. Again, CPU access
566 * rules concerning calls here are the same as for dma_unmap_single().
567 */
568void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
569 enum dma_data_direction dir)
570{
Russell King01135d922008-09-25 21:05:02 +0100571 struct scatterlist *s;
572 int i;
573
574 for_each_sg(sg, s, nents, i)
575 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100576}
577EXPORT_SYMBOL(dma_unmap_sg);
578
579/**
580 * dma_sync_sg_for_cpu
581 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
582 * @sg: list of buffers
583 * @nents: number of buffers to map (returned from dma_map_sg)
584 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
585 */
586void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
587 int nents, enum dma_data_direction dir)
588{
589 struct scatterlist *s;
590 int i;
591
592 for_each_sg(sg, s, nents, i) {
Russell King309dbba2008-09-29 19:50:59 +0100593 dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
594 sg_dma_len(s), dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100595 }
596}
597EXPORT_SYMBOL(dma_sync_sg_for_cpu);
598
599/**
600 * dma_sync_sg_for_device
601 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
602 * @sg: list of buffers
603 * @nents: number of buffers to map (returned from dma_map_sg)
604 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
605 */
606void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
607 int nents, enum dma_data_direction dir)
608{
609 struct scatterlist *s;
610 int i;
611
612 for_each_sg(sg, s, nents, i) {
Russell King2638b4d2008-09-25 21:38:41 +0100613 if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
614 sg_dma_len(s), dir))
615 continue;
616
Russell Kingafd1a322008-09-25 16:30:57 +0100617 if (!arch_is_coherent())
Nicolas Pitre43377452009-03-12 22:52:09 -0400618 dma_cache_maint_page(sg_page(s), s->offset,
619 s->length, dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100620 }
621}
622EXPORT_SYMBOL(dma_sync_sg_for_device);