blob: a9bdfcda23f498db2a15207310ce7450833038ab [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King0ddbccd2008-09-25 15:59:19 +01002 * linux/arch/arm/mm/dma-mapping.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA uncached mapping support.
11 */
12#include <linux/module.h>
13#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/errno.h>
16#include <linux/list.h>
17#include <linux/init.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20
Lennert Buytenhek23759dc2006-04-02 00:07:39 +010021#include <asm/memory.h>
Nicolas Pitre43377452009-03-12 22:52:09 -040022#include <asm/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/tlbflush.h>
Kevin Hilman37134cd2006-01-12 16:12:21 +000025#include <asm/sizes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Catalin Marinasab6494f2009-07-24 12:35:02 +010027static u64 get_coherent_dma_mask(struct device *dev)
28{
29 u64 mask = ISA_DMA_THRESHOLD;
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
Catalin Marinasab6494f2009-07-24 12:35:02 +010031 if (dev) {
32 mask = dev->coherent_dma_mask;
33
34 /*
35 * Sanity check the DMA mask - it must be non-zero, and
36 * must be able to be satisfied by a DMA allocation.
37 */
38 if (mask == 0) {
39 dev_warn(dev, "coherent DMA mask is unset\n");
40 return 0;
41 }
42
43 if ((~mask) & ISA_DMA_THRESHOLD) {
44 dev_warn(dev, "coherent DMA mask %#llx is smaller "
45 "than system GFP_DMA mask %#llx\n",
46 mask, (unsigned long long)ISA_DMA_THRESHOLD);
47 return 0;
48 }
49 }
50
51 return mask;
52}
53
Russell King7a9a32a2009-11-19 15:31:07 +000054/*
55 * Allocate a DMA buffer for 'dev' of size 'size' using the
56 * specified gfp mask. Note that 'size' must be page aligned.
57 */
58static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
59{
60 unsigned long order = get_order(size);
61 struct page *page, *p, *e;
62 void *ptr;
63 u64 mask = get_coherent_dma_mask(dev);
64
65#ifdef CONFIG_DMA_API_DEBUG
66 u64 limit = (mask + 1) & ~mask;
67 if (limit && size >= limit) {
68 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
69 size, mask);
70 return NULL;
71 }
72#endif
73
74 if (!mask)
75 return NULL;
76
77 if (mask < 0xffffffffULL)
78 gfp |= GFP_DMA;
79
80 page = alloc_pages(gfp, order);
81 if (!page)
82 return NULL;
83
84 /*
85 * Now split the huge page and free the excess pages
86 */
87 split_page(page, order);
88 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
89 __free_page(p);
90
91 /*
92 * Ensure that the allocated pages are zeroed, and that any data
93 * lurking in the kernel direct-mapped region is invalidated.
94 */
95 ptr = page_address(page);
96 memset(ptr, 0, size);
97 dmac_flush_range(ptr, ptr + size);
98 outer_flush_range(__pa(ptr), __pa(ptr) + size);
99
100 return page;
101}
102
103/*
104 * Free a DMA buffer. 'size' must be page aligned.
105 */
106static void __dma_free_buffer(struct page *page, size_t size)
107{
108 struct page *e = page + (size >> PAGE_SHIFT);
109
110 while (page < e) {
111 __free_page(page);
112 page++;
113 }
114}
115
Catalin Marinasab6494f2009-07-24 12:35:02 +0100116#ifdef CONFIG_MMU
Catalin Marinasa5e9d382010-06-21 15:09:06 +0100117/* Sanity check size */
118#if (CONSISTENT_DMA_SIZE % SZ_2M)
119#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
120#endif
121
122#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
123#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
124#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
125
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126/*
Kevin Hilman37134cd2006-01-12 16:12:21 +0000127 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 */
Kevin Hilman37134cd2006-01-12 16:12:21 +0000129static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Russell King13ccf3a2009-11-19 15:07:04 +0000131#include "vmregion.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Russell King13ccf3a2009-11-19 15:07:04 +0000133static struct arm_vmregion_head consistent_head = {
134 .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
136 .vm_start = CONSISTENT_BASE,
137 .vm_end = CONSISTENT_END,
138};
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140#ifdef CONFIG_HUGETLB_PAGE
141#error ARM Coherent DMA allocator does not (yet) support huge TLB
142#endif
143
Russell King88c58f32009-11-19 16:46:02 +0000144/*
145 * Initialise the consistent memory allocation.
146 */
147static int __init consistent_init(void)
148{
149 int ret = 0;
150 pgd_t *pgd;
Russell King516295e2010-11-21 16:27:49 +0000151 pud_t *pud;
Russell King88c58f32009-11-19 16:46:02 +0000152 pmd_t *pmd;
153 pte_t *pte;
154 int i = 0;
155 u32 base = CONSISTENT_BASE;
156
157 do {
158 pgd = pgd_offset(&init_mm, base);
Russell King516295e2010-11-21 16:27:49 +0000159
160 pud = pud_alloc(&init_mm, pgd, base);
161 if (!pud) {
162 printk(KERN_ERR "%s: no pud tables\n", __func__);
163 ret = -ENOMEM;
164 break;
165 }
166
167 pmd = pmd_alloc(&init_mm, pud, base);
Russell King88c58f32009-11-19 16:46:02 +0000168 if (!pmd) {
169 printk(KERN_ERR "%s: no pmd tables\n", __func__);
170 ret = -ENOMEM;
171 break;
172 }
173 WARN_ON(!pmd_none(*pmd));
174
175 pte = pte_alloc_kernel(pmd, base);
176 if (!pte) {
177 printk(KERN_ERR "%s: no pte tables\n", __func__);
178 ret = -ENOMEM;
179 break;
180 }
181
182 consistent_pte[i++] = pte;
183 base += (1 << PGDIR_SHIFT);
184 } while (base < CONSISTENT_END);
185
186 return ret;
187}
188
189core_initcall(consistent_init);
190
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191static void *
Russell King31ebf942009-11-19 21:12:17 +0000192__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193{
Russell King13ccf3a2009-11-19 15:07:04 +0000194 struct arm_vmregion *c;
Russell King5bc23d32010-07-25 08:57:02 +0100195 size_t align;
196 int bit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
Russell Kingebd7a842009-11-19 20:58:31 +0000198 if (!consistent_pte[0]) {
199 printk(KERN_ERR "%s: not initialised\n", __func__);
200 dump_stack();
Russell Kingebd7a842009-11-19 20:58:31 +0000201 return NULL;
202 }
203
Linus Torvalds1da177e2005-04-16 15:20:36 -0700204 /*
Russell King5bc23d32010-07-25 08:57:02 +0100205 * Align the virtual region allocation - maximum alignment is
206 * a section size, minimum is a page size. This helps reduce
207 * fragmentation of the DMA space, and also prevents allocations
208 * smaller than a section from crossing a section boundary.
209 */
Russell Kingc947f692010-11-03 16:00:15 +0000210 bit = fls(size - 1);
Russell King5bc23d32010-07-25 08:57:02 +0100211 if (bit > SECTION_SHIFT)
212 bit = SECTION_SHIFT;
213 align = 1 << bit;
214
215 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 * Allocate a virtual address in the consistent mapping region.
217 */
Russell King5bc23d32010-07-25 08:57:02 +0100218 c = arm_vmregion_alloc(&consistent_head, align, size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
220 if (c) {
Kevin Hilman37134cd2006-01-12 16:12:21 +0000221 pte_t *pte;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000222 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
223 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Kevin Hilman37134cd2006-01-12 16:12:21 +0000225 pte = consistent_pte[idx] + off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 c->vm_pages = page;
227
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 do {
229 BUG_ON(!pte_none(*pte));
230
Russell Kingad1ae2f2006-12-13 14:34:43 +0000231 set_pte_ext(pte, mk_pte(page, prot), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 page++;
233 pte++;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000234 off++;
235 if (off >= PTRS_PER_PTE) {
236 off = 0;
237 pte = consistent_pte[++idx];
238 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 } while (size -= PAGE_SIZE);
240
Russell King2be23c42010-09-08 16:27:56 +0100241 dsb();
242
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 return (void *)c->vm_start;
244 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 return NULL;
246}
Russell King695ae0a2009-11-19 16:31:39 +0000247
248static void __dma_free_remap(void *cpu_addr, size_t size)
249{
250 struct arm_vmregion *c;
251 unsigned long addr;
252 pte_t *ptep;
253 int idx;
254 u32 off;
255
256 c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
257 if (!c) {
258 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
259 __func__, cpu_addr);
260 dump_stack();
261 return;
262 }
263
264 if ((c->vm_end - c->vm_start) != size) {
265 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
266 __func__, c->vm_end - c->vm_start, size);
267 dump_stack();
268 size = c->vm_end - c->vm_start;
269 }
270
271 idx = CONSISTENT_PTE_INDEX(c->vm_start);
272 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
273 ptep = consistent_pte[idx] + off;
274 addr = c->vm_start;
275 do {
276 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
Russell King695ae0a2009-11-19 16:31:39 +0000277
278 ptep++;
279 addr += PAGE_SIZE;
280 off++;
281 if (off >= PTRS_PER_PTE) {
282 off = 0;
283 ptep = consistent_pte[++idx];
284 }
285
Russell Kingacaac252009-11-20 18:19:52 +0000286 if (pte_none(pte) || !pte_present(pte))
287 printk(KERN_CRIT "%s: bad page in kernel page table\n",
288 __func__);
Russell King695ae0a2009-11-19 16:31:39 +0000289 } while (size -= PAGE_SIZE);
290
291 flush_tlb_kernel_range(c->vm_start, c->vm_end);
292
293 arm_vmregion_free(&consistent_head, c);
294}
295
Catalin Marinasab6494f2009-07-24 12:35:02 +0100296#else /* !CONFIG_MMU */
Russell King695ae0a2009-11-19 16:31:39 +0000297
Russell King31ebf942009-11-19 21:12:17 +0000298#define __dma_alloc_remap(page, size, gfp, prot) page_address(page)
299#define __dma_free_remap(addr, size) do { } while (0)
300
301#endif /* CONFIG_MMU */
302
Catalin Marinasab6494f2009-07-24 12:35:02 +0100303static void *
304__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
305 pgprot_t prot)
306{
Russell King04da5692009-11-19 15:54:45 +0000307 struct page *page;
Russell King31ebf942009-11-19 21:12:17 +0000308 void *addr;
Catalin Marinasab6494f2009-07-24 12:35:02 +0100309
Catalin Marinasab6494f2009-07-24 12:35:02 +0100310 *handle = ~0;
Russell King04da5692009-11-19 15:54:45 +0000311 size = PAGE_ALIGN(size);
312
313 page = __dma_alloc_buffer(dev, size, gfp);
314 if (!page)
315 return NULL;
316
Russell King31ebf942009-11-19 21:12:17 +0000317 if (!arch_is_coherent())
318 addr = __dma_alloc_remap(page, size, gfp, prot);
319 else
320 addr = page_address(page);
321
322 if (addr)
323 *handle = page_to_dma(dev, page);
324
325 return addr;
Catalin Marinasab6494f2009-07-24 12:35:02 +0100326}
Russell King695ae0a2009-11-19 16:31:39 +0000327
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328/*
329 * Allocate DMA-coherent memory space and return both the kernel remapped
330 * virtual and bus address for that space.
331 */
332void *
Al Virof9e32142005-10-21 03:20:58 -0400333dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334{
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400335 void *memory;
336
337 if (dma_alloc_from_coherent(dev, size, handle, &memory))
338 return memory;
339
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 return __dma_alloc(dev, size, handle, gfp,
Russell King26a26d32009-11-20 21:06:43 +0000341 pgprot_dmacoherent(pgprot_kernel));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342}
343EXPORT_SYMBOL(dma_alloc_coherent);
344
345/*
346 * Allocate a writecombining region, in much the same way as
347 * dma_alloc_coherent above.
348 */
349void *
Al Virof9e32142005-10-21 03:20:58 -0400350dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351{
352 return __dma_alloc(dev, size, handle, gfp,
353 pgprot_writecombine(pgprot_kernel));
354}
355EXPORT_SYMBOL(dma_alloc_writecombine);
356
357static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
358 void *cpu_addr, dma_addr_t dma_addr, size_t size)
359{
Catalin Marinasab6494f2009-07-24 12:35:02 +0100360 int ret = -ENXIO;
361#ifdef CONFIG_MMU
Russell King13ccf3a2009-11-19 15:07:04 +0000362 unsigned long user_size, kern_size;
363 struct arm_vmregion *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
365 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
366
Russell King13ccf3a2009-11-19 15:07:04 +0000367 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700368 if (c) {
369 unsigned long off = vma->vm_pgoff;
370
371 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
372
373 if (off < kern_size &&
374 user_size <= (kern_size - off)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700375 ret = remap_pfn_range(vma, vma->vm_start,
376 page_to_pfn(c->vm_pages) + off,
377 user_size << PAGE_SHIFT,
378 vma->vm_page_prot);
379 }
380 }
Catalin Marinasab6494f2009-07-24 12:35:02 +0100381#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382
383 return ret;
384}
385
386int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
387 void *cpu_addr, dma_addr_t dma_addr, size_t size)
388{
Russell King26a26d32009-11-20 21:06:43 +0000389 vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
391}
392EXPORT_SYMBOL(dma_mmap_coherent);
393
394int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
395 void *cpu_addr, dma_addr_t dma_addr, size_t size)
396{
397 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
398 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
399}
400EXPORT_SYMBOL(dma_mmap_writecombine);
401
402/*
403 * free a page as defined by the above mapping.
Russell King5edf71a2005-11-25 15:52:51 +0000404 * Must not be called with IRQs disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405 */
406void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
407{
Russell King5edf71a2005-11-25 15:52:51 +0000408 WARN_ON(irqs_disabled());
409
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400410 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
411 return;
412
Russell King3e82d012009-11-19 15:38:12 +0000413 size = PAGE_ALIGN(size);
414
Russell King695ae0a2009-11-19 16:31:39 +0000415 if (!arch_is_coherent())
416 __dma_free_remap(cpu_addr, size);
Russell King7a9a32a2009-11-19 15:31:07 +0000417
418 __dma_free_buffer(dma_to_page(dev, handle), size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419}
420EXPORT_SYMBOL(dma_free_coherent);
421
422/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 * Make an area consistent for devices.
Dan Williams105ef9a2006-11-21 22:57:23 +0100424 * Note: Drivers should NOT use this function directly, as it will break
425 * platforms with CONFIG_DMABOUNCE.
426 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 */
Russell King4ea0d732009-11-24 16:27:17 +0000428void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
429 enum dma_data_direction dir)
430{
Russell King2ffe2da2009-10-31 16:52:16 +0000431 unsigned long paddr;
432
Russell Kinga9c91472009-11-26 16:19:58 +0000433 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
434
435 dmac_map_area(kaddr, size, dir);
Russell King2ffe2da2009-10-31 16:52:16 +0000436
437 paddr = __pa(kaddr);
438 if (dir == DMA_FROM_DEVICE) {
439 outer_inv_range(paddr, paddr + size);
440 } else {
441 outer_clean_range(paddr, paddr + size);
442 }
443 /* FIXME: non-speculating: flush on bidirectional mappings? */
Russell King4ea0d732009-11-24 16:27:17 +0000444}
445EXPORT_SYMBOL(___dma_single_cpu_to_dev);
446
447void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
448 enum dma_data_direction dir)
449{
Russell Kinga9c91472009-11-26 16:19:58 +0000450 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
451
Russell King2ffe2da2009-10-31 16:52:16 +0000452 /* FIXME: non-speculating: not required */
453 /* don't bother invalidating if DMA to device */
454 if (dir != DMA_TO_DEVICE) {
455 unsigned long paddr = __pa(kaddr);
456 outer_inv_range(paddr, paddr + size);
457 }
458
Russell Kinga9c91472009-11-26 16:19:58 +0000459 dmac_unmap_area(kaddr, size, dir);
Russell King4ea0d732009-11-24 16:27:17 +0000460}
461EXPORT_SYMBOL(___dma_single_dev_to_cpu);
Russell Kingafd1a322008-09-25 16:30:57 +0100462
Russell King65af1912009-11-24 17:53:33 +0000463static void dma_cache_maint_page(struct page *page, unsigned long offset,
Russell Kinga9c91472009-11-26 16:19:58 +0000464 size_t size, enum dma_data_direction dir,
465 void (*op)(const void *, size_t, int))
Russell King65af1912009-11-24 17:53:33 +0000466{
467 /*
468 * A single sg entry may refer to multiple physically contiguous
469 * pages. But we still need to process highmem pages individually.
470 * If highmem is not configured then the bulk of this loop gets
471 * optimized out.
472 */
473 size_t left = size;
474 do {
475 size_t len = left;
Russell King93f1d622009-11-24 14:41:01 +0000476 void *vaddr;
477
478 if (PageHighMem(page)) {
479 if (len + offset > PAGE_SIZE) {
480 if (offset >= PAGE_SIZE) {
481 page += offset / PAGE_SIZE;
482 offset %= PAGE_SIZE;
483 }
484 len = PAGE_SIZE - offset;
Russell King65af1912009-11-24 17:53:33 +0000485 }
Russell King93f1d622009-11-24 14:41:01 +0000486 vaddr = kmap_high_get(page);
487 if (vaddr) {
488 vaddr += offset;
Russell Kinga9c91472009-11-26 16:19:58 +0000489 op(vaddr, len, dir);
Russell King93f1d622009-11-24 14:41:01 +0000490 kunmap_high(page);
Nicolas Pitre7e5a69e2010-03-29 21:46:02 +0100491 } else if (cache_is_vipt()) {
492 pte_t saved_pte;
493 vaddr = kmap_high_l1_vipt(page, &saved_pte);
494 op(vaddr + offset, len, dir);
495 kunmap_high_l1_vipt(page, saved_pte);
Russell King93f1d622009-11-24 14:41:01 +0000496 }
497 } else {
498 vaddr = page_address(page) + offset;
Russell Kinga9c91472009-11-26 16:19:58 +0000499 op(vaddr, len, dir);
Russell King65af1912009-11-24 17:53:33 +0000500 }
Russell King65af1912009-11-24 17:53:33 +0000501 offset = 0;
502 page++;
503 left -= len;
504 } while (left);
505}
506
507void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
508 size_t size, enum dma_data_direction dir)
509{
Nicolas Pitre43377452009-03-12 22:52:09 -0400510 unsigned long paddr;
Nicolas Pitre43377452009-03-12 22:52:09 -0400511
Russell Kinga9c91472009-11-26 16:19:58 +0000512 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
Nicolas Pitre43377452009-03-12 22:52:09 -0400513
Russell King65af1912009-11-24 17:53:33 +0000514 paddr = page_to_phys(page) + off;
Russell King2ffe2da2009-10-31 16:52:16 +0000515 if (dir == DMA_FROM_DEVICE) {
516 outer_inv_range(paddr, paddr + size);
517 } else {
518 outer_clean_range(paddr, paddr + size);
519 }
520 /* FIXME: non-speculating: flush on bidirectional mappings? */
Nicolas Pitre43377452009-03-12 22:52:09 -0400521}
Russell King4ea0d732009-11-24 16:27:17 +0000522EXPORT_SYMBOL(___dma_page_cpu_to_dev);
523
524void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
525 size_t size, enum dma_data_direction dir)
526{
Russell King2ffe2da2009-10-31 16:52:16 +0000527 unsigned long paddr = page_to_phys(page) + off;
528
529 /* FIXME: non-speculating: not required */
530 /* don't bother invalidating if DMA to device */
531 if (dir != DMA_TO_DEVICE)
532 outer_inv_range(paddr, paddr + size);
533
Russell Kinga9c91472009-11-26 16:19:58 +0000534 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
Catalin Marinasc0177802010-09-13 15:57:36 +0100535
536 /*
537 * Mark the D-cache clean for this page to avoid extra flushing.
538 */
539 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
540 set_bit(PG_dcache_clean, &page->flags);
Russell King4ea0d732009-11-24 16:27:17 +0000541}
542EXPORT_SYMBOL(___dma_page_dev_to_cpu);
Nicolas Pitre43377452009-03-12 22:52:09 -0400543
Russell Kingafd1a322008-09-25 16:30:57 +0100544/**
545 * dma_map_sg - map a set of SG buffers for streaming mode DMA
546 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
547 * @sg: list of buffers
548 * @nents: number of buffers to map
549 * @dir: DMA transfer direction
550 *
551 * Map a set of buffers described by scatterlist in streaming mode for DMA.
552 * This is the scatter-gather version of the dma_map_single interface.
553 * Here the scatter gather list elements are each tagged with the
554 * appropriate dma address and length. They are obtained via
555 * sg_dma_{address,length}.
556 *
557 * Device ownership issues as mentioned for dma_map_single are the same
558 * here.
559 */
560int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
561 enum dma_data_direction dir)
562{
563 struct scatterlist *s;
Russell King01135d92008-09-25 21:05:02 +0100564 int i, j;
Russell Kingafd1a322008-09-25 16:30:57 +0100565
566 for_each_sg(sg, s, nents, i) {
Russell King01135d92008-09-25 21:05:02 +0100567 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
568 s->length, dir);
569 if (dma_mapping_error(dev, s->dma_address))
570 goto bad_mapping;
Russell Kingafd1a322008-09-25 16:30:57 +0100571 }
Russell Kingafd1a322008-09-25 16:30:57 +0100572 return nents;
Russell King01135d92008-09-25 21:05:02 +0100573
574 bad_mapping:
575 for_each_sg(sg, s, i, j)
576 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
577 return 0;
Russell Kingafd1a322008-09-25 16:30:57 +0100578}
579EXPORT_SYMBOL(dma_map_sg);
580
581/**
582 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
583 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
584 * @sg: list of buffers
585 * @nents: number of buffers to unmap (returned from dma_map_sg)
586 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
587 *
588 * Unmap a set of streaming mode DMA translations. Again, CPU access
589 * rules concerning calls here are the same as for dma_unmap_single().
590 */
591void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
592 enum dma_data_direction dir)
593{
Russell King01135d92008-09-25 21:05:02 +0100594 struct scatterlist *s;
595 int i;
596
597 for_each_sg(sg, s, nents, i)
598 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100599}
600EXPORT_SYMBOL(dma_unmap_sg);
601
602/**
603 * dma_sync_sg_for_cpu
604 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
605 * @sg: list of buffers
606 * @nents: number of buffers to map (returned from dma_map_sg)
607 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
608 */
609void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
610 int nents, enum dma_data_direction dir)
611{
612 struct scatterlist *s;
613 int i;
614
615 for_each_sg(sg, s, nents, i) {
Russell King18eabe22009-10-31 16:52:16 +0000616 if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
617 sg_dma_len(s), dir))
618 continue;
619
620 __dma_page_dev_to_cpu(sg_page(s), s->offset,
621 s->length, dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100622 }
623}
624EXPORT_SYMBOL(dma_sync_sg_for_cpu);
625
626/**
627 * dma_sync_sg_for_device
628 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
629 * @sg: list of buffers
630 * @nents: number of buffers to map (returned from dma_map_sg)
631 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
632 */
633void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
634 int nents, enum dma_data_direction dir)
635{
636 struct scatterlist *s;
637 int i;
638
639 for_each_sg(sg, s, nents, i) {
Russell King2638b4d2008-09-25 21:38:41 +0100640 if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
641 sg_dma_len(s), dir))
642 continue;
643
Russell King18eabe22009-10-31 16:52:16 +0000644 __dma_page_cpu_to_dev(sg_page(s), s->offset,
645 s->length, dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100646 }
647}
648EXPORT_SYMBOL(dma_sync_sg_for_device);