blob: 19357f76ce89841593c89841469c9f4fcf9471aa [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King0ddbccd2008-09-25 15:59:19 +01002 * linux/arch/arm/mm/dma-mapping.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA uncached mapping support.
11 */
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/errno.h>
16#include <linux/list.h>
17#include <linux/init.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20
Lennert Buytenhek23759dc2006-04-02 00:07:39 +010021#include <asm/memory.h>
Nicolas Pitre43377452009-03-12 22:52:09 -040022#include <asm/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/tlbflush.h>
Kevin Hilman37134cd2006-01-12 16:12:21 +000025#include <asm/sizes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Kevin Hilman37134cd2006-01-12 16:12:21 +000027/* Sanity check size */
28#if (CONSISTENT_DMA_SIZE % SZ_2M)
29#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
30#endif
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#define CONSISTENT_END (0xffe00000)
Kevin Hilman37134cd2006-01-12 16:12:21 +000033#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
Kevin Hilman37134cd2006-01-12 16:12:21 +000036#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
37#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
38
Catalin Marinasab6494f2009-07-24 12:35:02 +010039static u64 get_coherent_dma_mask(struct device *dev)
40{
41 u64 mask = ISA_DMA_THRESHOLD;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Catalin Marinasab6494f2009-07-24 12:35:02 +010043 if (dev) {
44 mask = dev->coherent_dma_mask;
45
46 /*
47 * Sanity check the DMA mask - it must be non-zero, and
48 * must be able to be satisfied by a DMA allocation.
49 */
50 if (mask == 0) {
51 dev_warn(dev, "coherent DMA mask is unset\n");
52 return 0;
53 }
54
55 if ((~mask) & ISA_DMA_THRESHOLD) {
56 dev_warn(dev, "coherent DMA mask %#llx is smaller "
57 "than system GFP_DMA mask %#llx\n",
58 mask, (unsigned long long)ISA_DMA_THRESHOLD);
59 return 0;
60 }
61 }
62
63 return mask;
64}
65
Russell King7a9a32a2009-11-19 15:31:07 +000066/*
67 * Allocate a DMA buffer for 'dev' of size 'size' using the
68 * specified gfp mask. Note that 'size' must be page aligned.
69 */
70static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
71{
72 unsigned long order = get_order(size);
73 struct page *page, *p, *e;
74 void *ptr;
75 u64 mask = get_coherent_dma_mask(dev);
76
77#ifdef CONFIG_DMA_API_DEBUG
78 u64 limit = (mask + 1) & ~mask;
79 if (limit && size >= limit) {
80 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
81 size, mask);
82 return NULL;
83 }
84#endif
85
86 if (!mask)
87 return NULL;
88
89 if (mask < 0xffffffffULL)
90 gfp |= GFP_DMA;
91
92 page = alloc_pages(gfp, order);
93 if (!page)
94 return NULL;
95
96 /*
97 * Now split the huge page and free the excess pages
98 */
99 split_page(page, order);
100 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
101 __free_page(p);
102
103 /*
104 * Ensure that the allocated pages are zeroed, and that any data
105 * lurking in the kernel direct-mapped region is invalidated.
106 */
107 ptr = page_address(page);
108 memset(ptr, 0, size);
109 dmac_flush_range(ptr, ptr + size);
110 outer_flush_range(__pa(ptr), __pa(ptr) + size);
111
112 return page;
113}
114
115/*
116 * Free a DMA buffer. 'size' must be page aligned.
117 */
118static void __dma_free_buffer(struct page *page, size_t size)
119{
120 struct page *e = page + (size >> PAGE_SHIFT);
121
122 while (page < e) {
123 __free_page(page);
124 page++;
125 }
126}
127
Catalin Marinasab6494f2009-07-24 12:35:02 +0100128#ifdef CONFIG_MMU
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129/*
Kevin Hilman37134cd2006-01-12 16:12:21 +0000130 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 */
Kevin Hilman37134cd2006-01-12 16:12:21 +0000132static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Russell King13ccf3a2009-11-19 15:07:04 +0000134#include "vmregion.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Russell King13ccf3a2009-11-19 15:07:04 +0000136static struct arm_vmregion_head consistent_head = {
137 .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
139 .vm_start = CONSISTENT_BASE,
140 .vm_end = CONSISTENT_END,
141};
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143#ifdef CONFIG_HUGETLB_PAGE
144#error ARM Coherent DMA allocator does not (yet) support huge TLB
145#endif
146
Russell King88c58f32009-11-19 16:46:02 +0000147/*
148 * Initialise the consistent memory allocation.
149 */
150static int __init consistent_init(void)
151{
152 int ret = 0;
153 pgd_t *pgd;
154 pmd_t *pmd;
155 pte_t *pte;
156 int i = 0;
157 u32 base = CONSISTENT_BASE;
158
159 do {
160 pgd = pgd_offset(&init_mm, base);
161 pmd = pmd_alloc(&init_mm, pgd, base);
162 if (!pmd) {
163 printk(KERN_ERR "%s: no pmd tables\n", __func__);
164 ret = -ENOMEM;
165 break;
166 }
167 WARN_ON(!pmd_none(*pmd));
168
169 pte = pte_alloc_kernel(pmd, base);
170 if (!pte) {
171 printk(KERN_ERR "%s: no pte tables\n", __func__);
172 ret = -ENOMEM;
173 break;
174 }
175
176 consistent_pte[i++] = pte;
177 base += (1 << PGDIR_SHIFT);
178 } while (base < CONSISTENT_END);
179
180 return ret;
181}
182
183core_initcall(consistent_init);
184
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185static void *
Al Virof9e32142005-10-21 03:20:58 -0400186__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 pgprot_t prot)
188{
189 struct page *page;
Russell King13ccf3a2009-11-19 15:07:04 +0000190 struct arm_vmregion *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
Kevin Hilman37134cd2006-01-12 16:12:21 +0000192 if (!consistent_pte[0]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 printk(KERN_ERR "%s: not initialised\n", __func__);
194 dump_stack();
195 return NULL;
196 }
197
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Russell King7a9a32a2009-11-19 15:31:07 +0000200 page = __dma_alloc_buffer(dev, size, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 if (!page)
202 goto no_page;
203
204 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 * Allocate a virtual address in the consistent mapping region.
206 */
Russell King13ccf3a2009-11-19 15:07:04 +0000207 c = arm_vmregion_alloc(&consistent_head, size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
209 if (c) {
Kevin Hilman37134cd2006-01-12 16:12:21 +0000210 pte_t *pte;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000211 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
212 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213
Kevin Hilman37134cd2006-01-12 16:12:21 +0000214 pte = consistent_pte[idx] + off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 c->vm_pages = page;
216
217 /*
218 * Set the "dma handle"
219 */
220 *handle = page_to_dma(dev, page);
221
222 do {
223 BUG_ON(!pte_none(*pte));
224
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 /*
226 * x86 does not mark the pages reserved...
227 */
228 SetPageReserved(page);
Russell Kingad1ae2f2006-12-13 14:34:43 +0000229 set_pte_ext(pte, mk_pte(page, prot), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700230 page++;
231 pte++;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000232 off++;
233 if (off >= PTRS_PER_PTE) {
234 off = 0;
235 pte = consistent_pte[++idx];
236 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 } while (size -= PAGE_SIZE);
238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 return (void *)c->vm_start;
240 }
241
242 if (page)
Russell King7a9a32a2009-11-19 15:31:07 +0000243 __dma_free_buffer(page, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 no_page:
245 *handle = ~0;
246 return NULL;
247}
Russell King695ae0a2009-11-19 16:31:39 +0000248
249static void __dma_free_remap(void *cpu_addr, size_t size)
250{
251 struct arm_vmregion *c;
252 unsigned long addr;
253 pte_t *ptep;
254 int idx;
255 u32 off;
256
257 c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
258 if (!c) {
259 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
260 __func__, cpu_addr);
261 dump_stack();
262 return;
263 }
264
265 if ((c->vm_end - c->vm_start) != size) {
266 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
267 __func__, c->vm_end - c->vm_start, size);
268 dump_stack();
269 size = c->vm_end - c->vm_start;
270 }
271
272 idx = CONSISTENT_PTE_INDEX(c->vm_start);
273 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
274 ptep = consistent_pte[idx] + off;
275 addr = c->vm_start;
276 do {
277 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
278 unsigned long pfn;
279
280 ptep++;
281 addr += PAGE_SIZE;
282 off++;
283 if (off >= PTRS_PER_PTE) {
284 off = 0;
285 ptep = consistent_pte[++idx];
286 }
287
288 if (!pte_none(pte) && pte_present(pte)) {
289 pfn = pte_pfn(pte);
290
291 if (pfn_valid(pfn)) {
292 struct page *page = pfn_to_page(pfn);
293
294 /*
295 * x86 does not mark the pages reserved...
296 */
297 ClearPageReserved(page);
298 continue;
299 }
300 }
301 printk(KERN_CRIT "%s: bad page in kernel page table\n",
302 __func__);
303 } while (size -= PAGE_SIZE);
304
305 flush_tlb_kernel_range(c->vm_start, c->vm_end);
306
307 arm_vmregion_free(&consistent_head, c);
308}
309
Catalin Marinasab6494f2009-07-24 12:35:02 +0100310#else /* !CONFIG_MMU */
Russell King695ae0a2009-11-19 16:31:39 +0000311
Catalin Marinasab6494f2009-07-24 12:35:02 +0100312static void *
313__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
314 pgprot_t prot)
315{
Russell King04da5692009-11-19 15:54:45 +0000316 struct page *page;
Catalin Marinasab6494f2009-07-24 12:35:02 +0100317
Catalin Marinasab6494f2009-07-24 12:35:02 +0100318 *handle = ~0;
Russell King04da5692009-11-19 15:54:45 +0000319 size = PAGE_ALIGN(size);
320
321 page = __dma_alloc_buffer(dev, size, gfp);
322 if (!page)
323 return NULL;
324
325 *handle = page_to_dma(dev, page);
326 return page_address(page);
Catalin Marinasab6494f2009-07-24 12:35:02 +0100327}
Russell King695ae0a2009-11-19 16:31:39 +0000328
329#define __dma_free_remap(addr, size) do { } while (0)
330
Catalin Marinasab6494f2009-07-24 12:35:02 +0100331#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
333/*
334 * Allocate DMA-coherent memory space and return both the kernel remapped
335 * virtual and bus address for that space.
336 */
337void *
Al Virof9e32142005-10-21 03:20:58 -0400338dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339{
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400340 void *memory;
341
342 if (dma_alloc_from_coherent(dev, size, handle, &memory))
343 return memory;
344
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100345 if (arch_is_coherent()) {
Russell King3e82d012009-11-19 15:38:12 +0000346 struct page *page;
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100347
Russell King3e82d012009-11-19 15:38:12 +0000348 page = __dma_alloc_buffer(dev, PAGE_ALIGN(size), gfp);
349 if (!page) {
350 *handle = ~0;
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100351 return NULL;
Russell King3e82d012009-11-19 15:38:12 +0000352 }
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100353
Russell King3e82d012009-11-19 15:38:12 +0000354 *handle = page_to_dma(dev, page);
355 return page_address(page);
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100356 }
357
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 return __dma_alloc(dev, size, handle, gfp,
359 pgprot_noncached(pgprot_kernel));
360}
361EXPORT_SYMBOL(dma_alloc_coherent);
362
363/*
364 * Allocate a writecombining region, in much the same way as
365 * dma_alloc_coherent above.
366 */
367void *
Al Virof9e32142005-10-21 03:20:58 -0400368dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369{
370 return __dma_alloc(dev, size, handle, gfp,
371 pgprot_writecombine(pgprot_kernel));
372}
373EXPORT_SYMBOL(dma_alloc_writecombine);
374
375static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
376 void *cpu_addr, dma_addr_t dma_addr, size_t size)
377{
Catalin Marinasab6494f2009-07-24 12:35:02 +0100378 int ret = -ENXIO;
379#ifdef CONFIG_MMU
Russell King13ccf3a2009-11-19 15:07:04 +0000380 unsigned long user_size, kern_size;
381 struct arm_vmregion *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382
383 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
384
Russell King13ccf3a2009-11-19 15:07:04 +0000385 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700386 if (c) {
387 unsigned long off = vma->vm_pgoff;
388
389 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
390
391 if (off < kern_size &&
392 user_size <= (kern_size - off)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393 ret = remap_pfn_range(vma, vma->vm_start,
394 page_to_pfn(c->vm_pages) + off,
395 user_size << PAGE_SHIFT,
396 vma->vm_page_prot);
397 }
398 }
Catalin Marinasab6494f2009-07-24 12:35:02 +0100399#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
401 return ret;
402}
403
404int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
405 void *cpu_addr, dma_addr_t dma_addr, size_t size)
406{
407 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
408 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
409}
410EXPORT_SYMBOL(dma_mmap_coherent);
411
412int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
413 void *cpu_addr, dma_addr_t dma_addr, size_t size)
414{
415 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
416 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
417}
418EXPORT_SYMBOL(dma_mmap_writecombine);
419
420/*
421 * free a page as defined by the above mapping.
Russell King5edf71a2005-11-25 15:52:51 +0000422 * Must not be called with IRQs disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 */
424void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
425{
Russell King5edf71a2005-11-25 15:52:51 +0000426 WARN_ON(irqs_disabled());
427
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400428 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
429 return;
430
Russell King3e82d012009-11-19 15:38:12 +0000431 size = PAGE_ALIGN(size);
432
Russell King695ae0a2009-11-19 16:31:39 +0000433 if (!arch_is_coherent())
434 __dma_free_remap(cpu_addr, size);
Russell King7a9a32a2009-11-19 15:31:07 +0000435
436 __dma_free_buffer(dma_to_page(dev, handle), size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437}
438EXPORT_SYMBOL(dma_free_coherent);
439
440/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441 * Make an area consistent for devices.
Dan Williams105ef9a2006-11-21 22:57:23 +0100442 * Note: Drivers should NOT use this function directly, as it will break
443 * platforms with CONFIG_DMABOUNCE.
444 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445 */
Russell King84aa4622007-10-09 14:17:01 +0100446void dma_cache_maint(const void *start, size_t size, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447{
Russell King1522ac32009-03-12 17:03:48 +0000448 void (*inner_op)(const void *, const void *);
449 void (*outer_op)(unsigned long, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
Russell King1522ac32009-03-12 17:03:48 +0000451 BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
Catalin Marinas953233d2007-02-05 14:48:08 +0100452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 switch (direction) {
454 case DMA_FROM_DEVICE: /* invalidate only */
Russell King1522ac32009-03-12 17:03:48 +0000455 inner_op = dmac_inv_range;
456 outer_op = outer_inv_range;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 break;
458 case DMA_TO_DEVICE: /* writeback only */
Russell King1522ac32009-03-12 17:03:48 +0000459 inner_op = dmac_clean_range;
460 outer_op = outer_clean_range;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 break;
462 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
Russell King1522ac32009-03-12 17:03:48 +0000463 inner_op = dmac_flush_range;
464 outer_op = outer_flush_range;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 break;
466 default:
467 BUG();
468 }
Russell King1522ac32009-03-12 17:03:48 +0000469
470 inner_op(start, start + size);
471 outer_op(__pa(start), __pa(start) + size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472}
Russell King84aa4622007-10-09 14:17:01 +0100473EXPORT_SYMBOL(dma_cache_maint);
Russell Kingafd1a322008-09-25 16:30:57 +0100474
Nicolas Pitre43377452009-03-12 22:52:09 -0400475static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
476 size_t size, int direction)
477{
478 void *vaddr;
479 unsigned long paddr;
480 void (*inner_op)(const void *, const void *);
481 void (*outer_op)(unsigned long, unsigned long);
482
483 switch (direction) {
484 case DMA_FROM_DEVICE: /* invalidate only */
485 inner_op = dmac_inv_range;
486 outer_op = outer_inv_range;
487 break;
488 case DMA_TO_DEVICE: /* writeback only */
489 inner_op = dmac_clean_range;
490 outer_op = outer_clean_range;
491 break;
492 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
493 inner_op = dmac_flush_range;
494 outer_op = outer_flush_range;
495 break;
496 default:
497 BUG();
498 }
499
500 if (!PageHighMem(page)) {
501 vaddr = page_address(page) + offset;
502 inner_op(vaddr, vaddr + size);
503 } else {
504 vaddr = kmap_high_get(page);
505 if (vaddr) {
506 vaddr += offset;
507 inner_op(vaddr, vaddr + size);
508 kunmap_high(page);
509 }
510 }
511
512 paddr = page_to_phys(page) + offset;
513 outer_op(paddr, paddr + size);
514}
515
516void dma_cache_maint_page(struct page *page, unsigned long offset,
517 size_t size, int dir)
518{
519 /*
520 * A single sg entry may refer to multiple physically contiguous
521 * pages. But we still need to process highmem pages individually.
522 * If highmem is not configured then the bulk of this loop gets
523 * optimized out.
524 */
525 size_t left = size;
526 do {
527 size_t len = left;
528 if (PageHighMem(page) && len + offset > PAGE_SIZE) {
529 if (offset >= PAGE_SIZE) {
530 page += offset / PAGE_SIZE;
531 offset %= PAGE_SIZE;
532 }
533 len = PAGE_SIZE - offset;
534 }
535 dma_cache_maint_contiguous(page, offset, len, dir);
536 offset = 0;
537 page++;
538 left -= len;
539 } while (left);
540}
541EXPORT_SYMBOL(dma_cache_maint_page);
542
Russell Kingafd1a322008-09-25 16:30:57 +0100543/**
544 * dma_map_sg - map a set of SG buffers for streaming mode DMA
545 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
546 * @sg: list of buffers
547 * @nents: number of buffers to map
548 * @dir: DMA transfer direction
549 *
550 * Map a set of buffers described by scatterlist in streaming mode for DMA.
551 * This is the scatter-gather version of the dma_map_single interface.
552 * Here the scatter gather list elements are each tagged with the
553 * appropriate dma address and length. They are obtained via
554 * sg_dma_{address,length}.
555 *
556 * Device ownership issues as mentioned for dma_map_single are the same
557 * here.
558 */
559int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
560 enum dma_data_direction dir)
561{
562 struct scatterlist *s;
Russell King01135d922008-09-25 21:05:02 +0100563 int i, j;
Russell Kingafd1a322008-09-25 16:30:57 +0100564
565 for_each_sg(sg, s, nents, i) {
Russell King01135d922008-09-25 21:05:02 +0100566 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
567 s->length, dir);
568 if (dma_mapping_error(dev, s->dma_address))
569 goto bad_mapping;
Russell Kingafd1a322008-09-25 16:30:57 +0100570 }
Russell Kingafd1a322008-09-25 16:30:57 +0100571 return nents;
Russell King01135d922008-09-25 21:05:02 +0100572
573 bad_mapping:
574 for_each_sg(sg, s, i, j)
575 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
576 return 0;
Russell Kingafd1a322008-09-25 16:30:57 +0100577}
578EXPORT_SYMBOL(dma_map_sg);
579
580/**
581 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
582 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
583 * @sg: list of buffers
584 * @nents: number of buffers to unmap (returned from dma_map_sg)
585 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
586 *
587 * Unmap a set of streaming mode DMA translations. Again, CPU access
588 * rules concerning calls here are the same as for dma_unmap_single().
589 */
590void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
591 enum dma_data_direction dir)
592{
Russell King01135d922008-09-25 21:05:02 +0100593 struct scatterlist *s;
594 int i;
595
596 for_each_sg(sg, s, nents, i)
597 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100598}
599EXPORT_SYMBOL(dma_unmap_sg);
600
601/**
602 * dma_sync_sg_for_cpu
603 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
604 * @sg: list of buffers
605 * @nents: number of buffers to map (returned from dma_map_sg)
606 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
607 */
608void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
609 int nents, enum dma_data_direction dir)
610{
611 struct scatterlist *s;
612 int i;
613
614 for_each_sg(sg, s, nents, i) {
Russell King309dbba2008-09-29 19:50:59 +0100615 dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
616 sg_dma_len(s), dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100617 }
618}
619EXPORT_SYMBOL(dma_sync_sg_for_cpu);
620
621/**
622 * dma_sync_sg_for_device
623 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
624 * @sg: list of buffers
625 * @nents: number of buffers to map (returned from dma_map_sg)
626 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
627 */
628void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
629 int nents, enum dma_data_direction dir)
630{
631 struct scatterlist *s;
632 int i;
633
634 for_each_sg(sg, s, nents, i) {
Russell King2638b4d2008-09-25 21:38:41 +0100635 if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
636 sg_dma_len(s), dir))
637 continue;
638
Russell Kingafd1a322008-09-25 16:30:57 +0100639 if (!arch_is_coherent())
Nicolas Pitre43377452009-03-12 22:52:09 -0400640 dma_cache_maint_page(sg_page(s), s->offset,
641 s->length, dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100642 }
643}
644EXPORT_SYMBOL(dma_sync_sg_for_device);