blob: a67130873431c68270b9bbd3b5d9aeae3b7d1b6a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King0ddbccd2008-09-25 15:59:19 +01002 * linux/arch/arm/mm/dma-mapping.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA uncached mapping support.
11 */
12#include <linux/module.h>
13#include <linux/mm.h>
14#include <linux/slab.h>
15#include <linux/errno.h>
16#include <linux/list.h>
17#include <linux/init.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
20
Lennert Buytenhek23759dc2006-04-02 00:07:39 +010021#include <asm/memory.h>
Nicolas Pitre43377452009-03-12 22:52:09 -040022#include <asm/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/tlbflush.h>
Kevin Hilman37134cd2006-01-12 16:12:21 +000025#include <asm/sizes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Kevin Hilman37134cd2006-01-12 16:12:21 +000027/* Sanity check size */
28#if (CONSISTENT_DMA_SIZE % SZ_2M)
29#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
30#endif
31
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#define CONSISTENT_END (0xffe00000)
Kevin Hilman37134cd2006-01-12 16:12:21 +000033#define CONSISTENT_BASE (CONSISTENT_END - CONSISTENT_DMA_SIZE)
34
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
Kevin Hilman37134cd2006-01-12 16:12:21 +000036#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
37#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
38
Catalin Marinasab6494f2009-07-24 12:35:02 +010039static u64 get_coherent_dma_mask(struct device *dev)
40{
41 u64 mask = ISA_DMA_THRESHOLD;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
Catalin Marinasab6494f2009-07-24 12:35:02 +010043 if (dev) {
44 mask = dev->coherent_dma_mask;
45
46 /*
47 * Sanity check the DMA mask - it must be non-zero, and
48 * must be able to be satisfied by a DMA allocation.
49 */
50 if (mask == 0) {
51 dev_warn(dev, "coherent DMA mask is unset\n");
52 return 0;
53 }
54
55 if ((~mask) & ISA_DMA_THRESHOLD) {
56 dev_warn(dev, "coherent DMA mask %#llx is smaller "
57 "than system GFP_DMA mask %#llx\n",
58 mask, (unsigned long long)ISA_DMA_THRESHOLD);
59 return 0;
60 }
61 }
62
63 return mask;
64}
65
Russell King7a9a32a2009-11-19 15:31:07 +000066/*
67 * Allocate a DMA buffer for 'dev' of size 'size' using the
68 * specified gfp mask. Note that 'size' must be page aligned.
69 */
70static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
71{
72 unsigned long order = get_order(size);
73 struct page *page, *p, *e;
74 void *ptr;
75 u64 mask = get_coherent_dma_mask(dev);
76
77#ifdef CONFIG_DMA_API_DEBUG
78 u64 limit = (mask + 1) & ~mask;
79 if (limit && size >= limit) {
80 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
81 size, mask);
82 return NULL;
83 }
84#endif
85
86 if (!mask)
87 return NULL;
88
89 if (mask < 0xffffffffULL)
90 gfp |= GFP_DMA;
91
92 page = alloc_pages(gfp, order);
93 if (!page)
94 return NULL;
95
96 /*
97 * Now split the huge page and free the excess pages
98 */
99 split_page(page, order);
100 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
101 __free_page(p);
102
103 /*
104 * Ensure that the allocated pages are zeroed, and that any data
105 * lurking in the kernel direct-mapped region is invalidated.
106 */
107 ptr = page_address(page);
108 memset(ptr, 0, size);
109 dmac_flush_range(ptr, ptr + size);
110 outer_flush_range(__pa(ptr), __pa(ptr) + size);
111
112 return page;
113}
114
115/*
116 * Free a DMA buffer. 'size' must be page aligned.
117 */
118static void __dma_free_buffer(struct page *page, size_t size)
119{
120 struct page *e = page + (size >> PAGE_SHIFT);
121
122 while (page < e) {
123 __free_page(page);
124 page++;
125 }
126}
127
Catalin Marinasab6494f2009-07-24 12:35:02 +0100128#ifdef CONFIG_MMU
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129/*
Kevin Hilman37134cd2006-01-12 16:12:21 +0000130 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 */
Kevin Hilman37134cd2006-01-12 16:12:21 +0000132static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Russell King13ccf3a2009-11-19 15:07:04 +0000134#include "vmregion.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135
Russell King13ccf3a2009-11-19 15:07:04 +0000136static struct arm_vmregion_head consistent_head = {
137 .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
139 .vm_start = CONSISTENT_BASE,
140 .vm_end = CONSISTENT_END,
141};
142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143#ifdef CONFIG_HUGETLB_PAGE
144#error ARM Coherent DMA allocator does not (yet) support huge TLB
145#endif
146
147static void *
Al Virof9e32142005-10-21 03:20:58 -0400148__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 pgprot_t prot)
150{
151 struct page *page;
Russell King13ccf3a2009-11-19 15:07:04 +0000152 struct arm_vmregion *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Kevin Hilman37134cd2006-01-12 16:12:21 +0000154 if (!consistent_pte[0]) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 printk(KERN_ERR "%s: not initialised\n", __func__);
156 dump_stack();
157 return NULL;
158 }
159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 size = PAGE_ALIGN(size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161
Russell King7a9a32a2009-11-19 15:31:07 +0000162 page = __dma_alloc_buffer(dev, size, gfp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 if (!page)
164 goto no_page;
165
166 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167 * Allocate a virtual address in the consistent mapping region.
168 */
Russell King13ccf3a2009-11-19 15:07:04 +0000169 c = arm_vmregion_alloc(&consistent_head, size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
171 if (c) {
Kevin Hilman37134cd2006-01-12 16:12:21 +0000172 pte_t *pte;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000173 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
174 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175
Kevin Hilman37134cd2006-01-12 16:12:21 +0000176 pte = consistent_pte[idx] + off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 c->vm_pages = page;
178
179 /*
180 * Set the "dma handle"
181 */
182 *handle = page_to_dma(dev, page);
183
184 do {
185 BUG_ON(!pte_none(*pte));
186
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 /*
188 * x86 does not mark the pages reserved...
189 */
190 SetPageReserved(page);
Russell Kingad1ae2f2006-12-13 14:34:43 +0000191 set_pte_ext(pte, mk_pte(page, prot), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 page++;
193 pte++;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000194 off++;
195 if (off >= PTRS_PER_PTE) {
196 off = 0;
197 pte = consistent_pte[++idx];
198 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 } while (size -= PAGE_SIZE);
200
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201 return (void *)c->vm_start;
202 }
203
204 if (page)
Russell King7a9a32a2009-11-19 15:31:07 +0000205 __dma_free_buffer(page, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 no_page:
207 *handle = ~0;
208 return NULL;
209}
Catalin Marinasab6494f2009-07-24 12:35:02 +0100210#else /* !CONFIG_MMU */
211static void *
212__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
213 pgprot_t prot)
214{
215 void *virt;
216 u64 mask = get_coherent_dma_mask(dev);
217
218 if (!mask)
219 goto error;
220
Russell Kingc06e0042009-10-25 22:36:10 +0000221 if (mask < 0xffffffffULL)
Catalin Marinasab6494f2009-07-24 12:35:02 +0100222 gfp |= GFP_DMA;
223 virt = kmalloc(size, gfp);
224 if (!virt)
225 goto error;
226
227 *handle = virt_to_dma(dev, virt);
228 return virt;
229
230error:
231 *handle = ~0;
232 return NULL;
233}
234#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700235
236/*
237 * Allocate DMA-coherent memory space and return both the kernel remapped
238 * virtual and bus address for that space.
239 */
240void *
Al Virof9e32142005-10-21 03:20:58 -0400241dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242{
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400243 void *memory;
244
245 if (dma_alloc_from_coherent(dev, size, handle, &memory))
246 return memory;
247
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100248 if (arch_is_coherent()) {
Russell King3e82d012009-11-19 15:38:12 +0000249 struct page *page;
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100250
Russell King3e82d012009-11-19 15:38:12 +0000251 page = __dma_alloc_buffer(dev, PAGE_ALIGN(size), gfp);
252 if (!page) {
253 *handle = ~0;
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100254 return NULL;
Russell King3e82d012009-11-19 15:38:12 +0000255 }
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100256
Russell King3e82d012009-11-19 15:38:12 +0000257 *handle = page_to_dma(dev, page);
258 return page_address(page);
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100259 }
260
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261 return __dma_alloc(dev, size, handle, gfp,
262 pgprot_noncached(pgprot_kernel));
263}
264EXPORT_SYMBOL(dma_alloc_coherent);
265
266/*
267 * Allocate a writecombining region, in much the same way as
268 * dma_alloc_coherent above.
269 */
270void *
Al Virof9e32142005-10-21 03:20:58 -0400271dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272{
273 return __dma_alloc(dev, size, handle, gfp,
274 pgprot_writecombine(pgprot_kernel));
275}
276EXPORT_SYMBOL(dma_alloc_writecombine);
277
278static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
279 void *cpu_addr, dma_addr_t dma_addr, size_t size)
280{
Catalin Marinasab6494f2009-07-24 12:35:02 +0100281 int ret = -ENXIO;
282#ifdef CONFIG_MMU
Russell King13ccf3a2009-11-19 15:07:04 +0000283 unsigned long user_size, kern_size;
284 struct arm_vmregion *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285
286 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
287
Russell King13ccf3a2009-11-19 15:07:04 +0000288 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 if (c) {
290 unsigned long off = vma->vm_pgoff;
291
292 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
293
294 if (off < kern_size &&
295 user_size <= (kern_size - off)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 ret = remap_pfn_range(vma, vma->vm_start,
297 page_to_pfn(c->vm_pages) + off,
298 user_size << PAGE_SHIFT,
299 vma->vm_page_prot);
300 }
301 }
Catalin Marinasab6494f2009-07-24 12:35:02 +0100302#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
304 return ret;
305}
306
307int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
308 void *cpu_addr, dma_addr_t dma_addr, size_t size)
309{
310 vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
311 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
312}
313EXPORT_SYMBOL(dma_mmap_coherent);
314
315int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
316 void *cpu_addr, dma_addr_t dma_addr, size_t size)
317{
318 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
319 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
320}
321EXPORT_SYMBOL(dma_mmap_writecombine);
322
323/*
324 * free a page as defined by the above mapping.
Russell King5edf71a2005-11-25 15:52:51 +0000325 * Must not be called with IRQs disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 */
Catalin Marinasab6494f2009-07-24 12:35:02 +0100327#ifdef CONFIG_MMU
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
329{
Russell King13ccf3a2009-11-19 15:07:04 +0000330 struct arm_vmregion *c;
331 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 pte_t *ptep;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000333 int idx;
334 u32 off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
Russell King5edf71a2005-11-25 15:52:51 +0000336 WARN_ON(irqs_disabled());
337
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400338 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
339 return;
340
Russell King3e82d012009-11-19 15:38:12 +0000341 size = PAGE_ALIGN(size);
342
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100343 if (arch_is_coherent()) {
Russell King3e82d012009-11-19 15:38:12 +0000344 __dma_free_buffer(dma_to_page(dev, handle), size);
Lennert Buytenhek23759dc2006-04-02 00:07:39 +0100345 return;
346 }
347
Russell King13ccf3a2009-11-19 15:07:04 +0000348 c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 if (!c)
350 goto no_area;
351
352 if ((c->vm_end - c->vm_start) != size) {
353 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
354 __func__, c->vm_end - c->vm_start, size);
355 dump_stack();
356 size = c->vm_end - c->vm_start;
357 }
358
Kevin Hilman37134cd2006-01-12 16:12:21 +0000359 idx = CONSISTENT_PTE_INDEX(c->vm_start);
360 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
361 ptep = consistent_pte[idx] + off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 addr = c->vm_start;
363 do {
364 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
365 unsigned long pfn;
366
367 ptep++;
368 addr += PAGE_SIZE;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000369 off++;
370 if (off >= PTRS_PER_PTE) {
371 off = 0;
372 ptep = consistent_pte[++idx];
373 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374
375 if (!pte_none(pte) && pte_present(pte)) {
376 pfn = pte_pfn(pte);
377
378 if (pfn_valid(pfn)) {
379 struct page *page = pfn_to_page(pfn);
380
381 /*
382 * x86 does not mark the pages reserved...
383 */
384 ClearPageReserved(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 continue;
386 }
387 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700388 printk(KERN_CRIT "%s: bad page in kernel page table\n",
389 __func__);
390 } while (size -= PAGE_SIZE);
391
392 flush_tlb_kernel_range(c->vm_start, c->vm_end);
393
Russell King13ccf3a2009-11-19 15:07:04 +0000394 arm_vmregion_free(&consistent_head, c);
Russell King7a9a32a2009-11-19 15:31:07 +0000395
396 __dma_free_buffer(dma_to_page(dev, handle), size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700397 return;
398
399 no_area:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
401 __func__, cpu_addr);
402 dump_stack();
403}
Catalin Marinasab6494f2009-07-24 12:35:02 +0100404#else /* !CONFIG_MMU */
405void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
406{
407 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
408 return;
409 kfree(cpu_addr);
410}
411#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700412EXPORT_SYMBOL(dma_free_coherent);
413
414/*
415 * Initialise the consistent memory allocation.
416 */
417static int __init consistent_init(void)
418{
Catalin Marinasab6494f2009-07-24 12:35:02 +0100419 int ret = 0;
420#ifdef CONFIG_MMU
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 pgd_t *pgd;
422 pmd_t *pmd;
423 pte_t *pte;
Catalin Marinasab6494f2009-07-24 12:35:02 +0100424 int i = 0;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000425 u32 base = CONSISTENT_BASE;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426
Linus Torvalds1da177e2005-04-16 15:20:36 -0700427 do {
Kevin Hilman37134cd2006-01-12 16:12:21 +0000428 pgd = pgd_offset(&init_mm, base);
429 pmd = pmd_alloc(&init_mm, pgd, base);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 if (!pmd) {
431 printk(KERN_ERR "%s: no pmd tables\n", __func__);
432 ret = -ENOMEM;
433 break;
434 }
435 WARN_ON(!pmd_none(*pmd));
436
Kevin Hilman37134cd2006-01-12 16:12:21 +0000437 pte = pte_alloc_kernel(pmd, base);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 if (!pte) {
439 printk(KERN_ERR "%s: no pte tables\n", __func__);
440 ret = -ENOMEM;
441 break;
442 }
443
Kevin Hilman37134cd2006-01-12 16:12:21 +0000444 consistent_pte[i++] = pte;
445 base += (1 << PGDIR_SHIFT);
446 } while (base < CONSISTENT_END);
Catalin Marinasab6494f2009-07-24 12:35:02 +0100447#endif /* !CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 return ret;
450}
451
452core_initcall(consistent_init);
453
454/*
455 * Make an area consistent for devices.
Dan Williams105ef9a2006-11-21 22:57:23 +0100456 * Note: Drivers should NOT use this function directly, as it will break
457 * platforms with CONFIG_DMABOUNCE.
458 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459 */
Russell King84aa4622007-10-09 14:17:01 +0100460void dma_cache_maint(const void *start, size_t size, int direction)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461{
Russell King1522ac32009-03-12 17:03:48 +0000462 void (*inner_op)(const void *, const void *);
463 void (*outer_op)(unsigned long, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464
Russell King1522ac32009-03-12 17:03:48 +0000465 BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
Catalin Marinas953233d2007-02-05 14:48:08 +0100466
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 switch (direction) {
468 case DMA_FROM_DEVICE: /* invalidate only */
Russell King1522ac32009-03-12 17:03:48 +0000469 inner_op = dmac_inv_range;
470 outer_op = outer_inv_range;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 break;
472 case DMA_TO_DEVICE: /* writeback only */
Russell King1522ac32009-03-12 17:03:48 +0000473 inner_op = dmac_clean_range;
474 outer_op = outer_clean_range;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 break;
476 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
Russell King1522ac32009-03-12 17:03:48 +0000477 inner_op = dmac_flush_range;
478 outer_op = outer_flush_range;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 break;
480 default:
481 BUG();
482 }
Russell King1522ac32009-03-12 17:03:48 +0000483
484 inner_op(start, start + size);
485 outer_op(__pa(start), __pa(start) + size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486}
Russell King84aa4622007-10-09 14:17:01 +0100487EXPORT_SYMBOL(dma_cache_maint);
Russell Kingafd1a322008-09-25 16:30:57 +0100488
Nicolas Pitre43377452009-03-12 22:52:09 -0400489static void dma_cache_maint_contiguous(struct page *page, unsigned long offset,
490 size_t size, int direction)
491{
492 void *vaddr;
493 unsigned long paddr;
494 void (*inner_op)(const void *, const void *);
495 void (*outer_op)(unsigned long, unsigned long);
496
497 switch (direction) {
498 case DMA_FROM_DEVICE: /* invalidate only */
499 inner_op = dmac_inv_range;
500 outer_op = outer_inv_range;
501 break;
502 case DMA_TO_DEVICE: /* writeback only */
503 inner_op = dmac_clean_range;
504 outer_op = outer_clean_range;
505 break;
506 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
507 inner_op = dmac_flush_range;
508 outer_op = outer_flush_range;
509 break;
510 default:
511 BUG();
512 }
513
514 if (!PageHighMem(page)) {
515 vaddr = page_address(page) + offset;
516 inner_op(vaddr, vaddr + size);
517 } else {
518 vaddr = kmap_high_get(page);
519 if (vaddr) {
520 vaddr += offset;
521 inner_op(vaddr, vaddr + size);
522 kunmap_high(page);
523 }
524 }
525
526 paddr = page_to_phys(page) + offset;
527 outer_op(paddr, paddr + size);
528}
529
530void dma_cache_maint_page(struct page *page, unsigned long offset,
531 size_t size, int dir)
532{
533 /*
534 * A single sg entry may refer to multiple physically contiguous
535 * pages. But we still need to process highmem pages individually.
536 * If highmem is not configured then the bulk of this loop gets
537 * optimized out.
538 */
539 size_t left = size;
540 do {
541 size_t len = left;
542 if (PageHighMem(page) && len + offset > PAGE_SIZE) {
543 if (offset >= PAGE_SIZE) {
544 page += offset / PAGE_SIZE;
545 offset %= PAGE_SIZE;
546 }
547 len = PAGE_SIZE - offset;
548 }
549 dma_cache_maint_contiguous(page, offset, len, dir);
550 offset = 0;
551 page++;
552 left -= len;
553 } while (left);
554}
555EXPORT_SYMBOL(dma_cache_maint_page);
556
Russell Kingafd1a322008-09-25 16:30:57 +0100557/**
558 * dma_map_sg - map a set of SG buffers for streaming mode DMA
559 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
560 * @sg: list of buffers
561 * @nents: number of buffers to map
562 * @dir: DMA transfer direction
563 *
564 * Map a set of buffers described by scatterlist in streaming mode for DMA.
565 * This is the scatter-gather version of the dma_map_single interface.
566 * Here the scatter gather list elements are each tagged with the
567 * appropriate dma address and length. They are obtained via
568 * sg_dma_{address,length}.
569 *
570 * Device ownership issues as mentioned for dma_map_single are the same
571 * here.
572 */
573int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
574 enum dma_data_direction dir)
575{
576 struct scatterlist *s;
Russell King01135d922008-09-25 21:05:02 +0100577 int i, j;
Russell Kingafd1a322008-09-25 16:30:57 +0100578
579 for_each_sg(sg, s, nents, i) {
Russell King01135d922008-09-25 21:05:02 +0100580 s->dma_address = dma_map_page(dev, sg_page(s), s->offset,
581 s->length, dir);
582 if (dma_mapping_error(dev, s->dma_address))
583 goto bad_mapping;
Russell Kingafd1a322008-09-25 16:30:57 +0100584 }
Russell Kingafd1a322008-09-25 16:30:57 +0100585 return nents;
Russell King01135d922008-09-25 21:05:02 +0100586
587 bad_mapping:
588 for_each_sg(sg, s, i, j)
589 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
590 return 0;
Russell Kingafd1a322008-09-25 16:30:57 +0100591}
592EXPORT_SYMBOL(dma_map_sg);
593
594/**
595 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
596 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
597 * @sg: list of buffers
598 * @nents: number of buffers to unmap (returned from dma_map_sg)
599 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
600 *
601 * Unmap a set of streaming mode DMA translations. Again, CPU access
602 * rules concerning calls here are the same as for dma_unmap_single().
603 */
604void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
605 enum dma_data_direction dir)
606{
Russell King01135d922008-09-25 21:05:02 +0100607 struct scatterlist *s;
608 int i;
609
610 for_each_sg(sg, s, nents, i)
611 dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100612}
613EXPORT_SYMBOL(dma_unmap_sg);
614
615/**
616 * dma_sync_sg_for_cpu
617 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
618 * @sg: list of buffers
619 * @nents: number of buffers to map (returned from dma_map_sg)
620 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
621 */
622void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
623 int nents, enum dma_data_direction dir)
624{
625 struct scatterlist *s;
626 int i;
627
628 for_each_sg(sg, s, nents, i) {
Russell King309dbba2008-09-29 19:50:59 +0100629 dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
630 sg_dma_len(s), dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100631 }
632}
633EXPORT_SYMBOL(dma_sync_sg_for_cpu);
634
635/**
636 * dma_sync_sg_for_device
637 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
638 * @sg: list of buffers
639 * @nents: number of buffers to map (returned from dma_map_sg)
640 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
641 */
642void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
643 int nents, enum dma_data_direction dir)
644{
645 struct scatterlist *s;
646 int i;
647
648 for_each_sg(sg, s, nents, i) {
Russell King2638b4d2008-09-25 21:38:41 +0100649 if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
650 sg_dma_len(s), dir))
651 continue;
652
Russell Kingafd1a322008-09-25 16:30:57 +0100653 if (!arch_is_coherent())
Nicolas Pitre43377452009-03-12 22:52:09 -0400654 dma_cache_maint_page(sg_page(s), s->offset,
655 s->length, dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100656 }
657}
658EXPORT_SYMBOL(dma_sync_sg_for_device);