blob: 82a093cee09a781677ebf639a6450bcd9f4e59fd [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Russell King0ddbccd2008-09-25 15:59:19 +01002 * linux/arch/arm/mm/dma-mapping.c
Linus Torvalds1da177e2005-04-16 15:20:36 -07003 *
4 * Copyright (C) 2000-2004 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 *
10 * DMA uncached mapping support.
11 */
12#include <linux/module.h>
13#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090014#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/errno.h>
16#include <linux/list.h>
17#include <linux/init.h>
18#include <linux/device.h>
19#include <linux/dma-mapping.h>
Nicolas Pitre39af22a2010-12-15 15:14:45 -050020#include <linux/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Lennert Buytenhek23759dc2006-04-02 00:07:39 +010022#include <asm/memory.h>
Nicolas Pitre43377452009-03-12 22:52:09 -040023#include <asm/highmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <asm/tlbflush.h>
Kevin Hilman37134cd2006-01-12 16:12:21 +000026#include <asm/sizes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027
Catalin Marinasab6494f2009-07-24 12:35:02 +010028static u64 get_coherent_dma_mask(struct device *dev)
29{
30 u64 mask = ISA_DMA_THRESHOLD;
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
Catalin Marinasab6494f2009-07-24 12:35:02 +010032 if (dev) {
33 mask = dev->coherent_dma_mask;
34
35 /*
36 * Sanity check the DMA mask - it must be non-zero, and
37 * must be able to be satisfied by a DMA allocation.
38 */
39 if (mask == 0) {
40 dev_warn(dev, "coherent DMA mask is unset\n");
41 return 0;
42 }
43
44 if ((~mask) & ISA_DMA_THRESHOLD) {
45 dev_warn(dev, "coherent DMA mask %#llx is smaller "
46 "than system GFP_DMA mask %#llx\n",
47 mask, (unsigned long long)ISA_DMA_THRESHOLD);
48 return 0;
49 }
50 }
51
52 return mask;
53}
54
Russell King7a9a32a2009-11-19 15:31:07 +000055/*
56 * Allocate a DMA buffer for 'dev' of size 'size' using the
57 * specified gfp mask. Note that 'size' must be page aligned.
58 */
59static struct page *__dma_alloc_buffer(struct device *dev, size_t size, gfp_t gfp)
60{
61 unsigned long order = get_order(size);
62 struct page *page, *p, *e;
63 void *ptr;
64 u64 mask = get_coherent_dma_mask(dev);
65
66#ifdef CONFIG_DMA_API_DEBUG
67 u64 limit = (mask + 1) & ~mask;
68 if (limit && size >= limit) {
69 dev_warn(dev, "coherent allocation too big (requested %#x mask %#llx)\n",
70 size, mask);
71 return NULL;
72 }
73#endif
74
75 if (!mask)
76 return NULL;
77
78 if (mask < 0xffffffffULL)
79 gfp |= GFP_DMA;
80
81 page = alloc_pages(gfp, order);
82 if (!page)
83 return NULL;
84
85 /*
86 * Now split the huge page and free the excess pages
87 */
88 split_page(page, order);
89 for (p = page + (size >> PAGE_SHIFT), e = page + (1 << order); p < e; p++)
90 __free_page(p);
91
92 /*
93 * Ensure that the allocated pages are zeroed, and that any data
94 * lurking in the kernel direct-mapped region is invalidated.
95 */
96 ptr = page_address(page);
97 memset(ptr, 0, size);
98 dmac_flush_range(ptr, ptr + size);
99 outer_flush_range(__pa(ptr), __pa(ptr) + size);
100
101 return page;
102}
103
104/*
105 * Free a DMA buffer. 'size' must be page aligned.
106 */
107static void __dma_free_buffer(struct page *page, size_t size)
108{
109 struct page *e = page + (size >> PAGE_SHIFT);
110
111 while (page < e) {
112 __free_page(page);
113 page++;
114 }
115}
116
Catalin Marinasab6494f2009-07-24 12:35:02 +0100117#ifdef CONFIG_MMU
Catalin Marinasa5e9d382010-06-21 15:09:06 +0100118/* Sanity check size */
119#if (CONSISTENT_DMA_SIZE % SZ_2M)
120#error "CONSISTENT_DMA_SIZE must be multiple of 2MiB"
121#endif
122
123#define CONSISTENT_OFFSET(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PAGE_SHIFT)
124#define CONSISTENT_PTE_INDEX(x) (((unsigned long)(x) - CONSISTENT_BASE) >> PGDIR_SHIFT)
125#define NUM_CONSISTENT_PTES (CONSISTENT_DMA_SIZE >> PGDIR_SHIFT)
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127/*
Kevin Hilman37134cd2006-01-12 16:12:21 +0000128 * These are the page tables (2MB each) covering uncached, DMA consistent allocations
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 */
Kevin Hilman37134cd2006-01-12 16:12:21 +0000130static pte_t *consistent_pte[NUM_CONSISTENT_PTES];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Russell King13ccf3a2009-11-19 15:07:04 +0000132#include "vmregion.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133
Russell King13ccf3a2009-11-19 15:07:04 +0000134static struct arm_vmregion_head consistent_head = {
135 .vm_lock = __SPIN_LOCK_UNLOCKED(&consistent_head.vm_lock),
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 .vm_list = LIST_HEAD_INIT(consistent_head.vm_list),
137 .vm_start = CONSISTENT_BASE,
138 .vm_end = CONSISTENT_END,
139};
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141#ifdef CONFIG_HUGETLB_PAGE
142#error ARM Coherent DMA allocator does not (yet) support huge TLB
143#endif
144
Russell King88c58f32009-11-19 16:46:02 +0000145/*
146 * Initialise the consistent memory allocation.
147 */
148static int __init consistent_init(void)
149{
150 int ret = 0;
151 pgd_t *pgd;
Russell King516295e2010-11-21 16:27:49 +0000152 pud_t *pud;
Russell King88c58f32009-11-19 16:46:02 +0000153 pmd_t *pmd;
154 pte_t *pte;
155 int i = 0;
156 u32 base = CONSISTENT_BASE;
157
158 do {
159 pgd = pgd_offset(&init_mm, base);
Russell King516295e2010-11-21 16:27:49 +0000160
161 pud = pud_alloc(&init_mm, pgd, base);
162 if (!pud) {
163 printk(KERN_ERR "%s: no pud tables\n", __func__);
164 ret = -ENOMEM;
165 break;
166 }
167
168 pmd = pmd_alloc(&init_mm, pud, base);
Russell King88c58f32009-11-19 16:46:02 +0000169 if (!pmd) {
170 printk(KERN_ERR "%s: no pmd tables\n", __func__);
171 ret = -ENOMEM;
172 break;
173 }
174 WARN_ON(!pmd_none(*pmd));
175
176 pte = pte_alloc_kernel(pmd, base);
177 if (!pte) {
178 printk(KERN_ERR "%s: no pte tables\n", __func__);
179 ret = -ENOMEM;
180 break;
181 }
182
183 consistent_pte[i++] = pte;
184 base += (1 << PGDIR_SHIFT);
185 } while (base < CONSISTENT_END);
186
187 return ret;
188}
189
190core_initcall(consistent_init);
191
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192static void *
Russell King31ebf942009-11-19 21:12:17 +0000193__dma_alloc_remap(struct page *page, size_t size, gfp_t gfp, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{
Russell King13ccf3a2009-11-19 15:07:04 +0000195 struct arm_vmregion *c;
Russell King5bc23d32010-07-25 08:57:02 +0100196 size_t align;
197 int bit;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198
Russell Kingebd7a842009-11-19 20:58:31 +0000199 if (!consistent_pte[0]) {
200 printk(KERN_ERR "%s: not initialised\n", __func__);
201 dump_stack();
Russell Kingebd7a842009-11-19 20:58:31 +0000202 return NULL;
203 }
204
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205 /*
Russell King5bc23d32010-07-25 08:57:02 +0100206 * Align the virtual region allocation - maximum alignment is
207 * a section size, minimum is a page size. This helps reduce
208 * fragmentation of the DMA space, and also prevents allocations
209 * smaller than a section from crossing a section boundary.
210 */
Russell Kingc947f692010-11-03 16:00:15 +0000211 bit = fls(size - 1);
Russell King5bc23d32010-07-25 08:57:02 +0100212 if (bit > SECTION_SHIFT)
213 bit = SECTION_SHIFT;
214 align = 1 << bit;
215
216 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 * Allocate a virtual address in the consistent mapping region.
218 */
Russell King5bc23d32010-07-25 08:57:02 +0100219 c = arm_vmregion_alloc(&consistent_head, align, size,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 gfp & ~(__GFP_DMA | __GFP_HIGHMEM));
221 if (c) {
Kevin Hilman37134cd2006-01-12 16:12:21 +0000222 pte_t *pte;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000223 int idx = CONSISTENT_PTE_INDEX(c->vm_start);
224 u32 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
Kevin Hilman37134cd2006-01-12 16:12:21 +0000226 pte = consistent_pte[idx] + off;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 c->vm_pages = page;
228
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 do {
230 BUG_ON(!pte_none(*pte));
231
Russell Kingad1ae2f2006-12-13 14:34:43 +0000232 set_pte_ext(pte, mk_pte(page, prot), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 page++;
234 pte++;
Kevin Hilman37134cd2006-01-12 16:12:21 +0000235 off++;
236 if (off >= PTRS_PER_PTE) {
237 off = 0;
238 pte = consistent_pte[++idx];
239 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 } while (size -= PAGE_SIZE);
241
Russell King2be23c42010-09-08 16:27:56 +0100242 dsb();
243
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 return (void *)c->vm_start;
245 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 return NULL;
247}
Russell King695ae0a2009-11-19 16:31:39 +0000248
249static void __dma_free_remap(void *cpu_addr, size_t size)
250{
251 struct arm_vmregion *c;
252 unsigned long addr;
253 pte_t *ptep;
254 int idx;
255 u32 off;
256
257 c = arm_vmregion_find_remove(&consistent_head, (unsigned long)cpu_addr);
258 if (!c) {
259 printk(KERN_ERR "%s: trying to free invalid coherent area: %p\n",
260 __func__, cpu_addr);
261 dump_stack();
262 return;
263 }
264
265 if ((c->vm_end - c->vm_start) != size) {
266 printk(KERN_ERR "%s: freeing wrong coherent size (%ld != %d)\n",
267 __func__, c->vm_end - c->vm_start, size);
268 dump_stack();
269 size = c->vm_end - c->vm_start;
270 }
271
272 idx = CONSISTENT_PTE_INDEX(c->vm_start);
273 off = CONSISTENT_OFFSET(c->vm_start) & (PTRS_PER_PTE-1);
274 ptep = consistent_pte[idx] + off;
275 addr = c->vm_start;
276 do {
277 pte_t pte = ptep_get_and_clear(&init_mm, addr, ptep);
Russell King695ae0a2009-11-19 16:31:39 +0000278
279 ptep++;
280 addr += PAGE_SIZE;
281 off++;
282 if (off >= PTRS_PER_PTE) {
283 off = 0;
284 ptep = consistent_pte[++idx];
285 }
286
Russell Kingacaac252009-11-20 18:19:52 +0000287 if (pte_none(pte) || !pte_present(pte))
288 printk(KERN_CRIT "%s: bad page in kernel page table\n",
289 __func__);
Russell King695ae0a2009-11-19 16:31:39 +0000290 } while (size -= PAGE_SIZE);
291
292 flush_tlb_kernel_range(c->vm_start, c->vm_end);
293
294 arm_vmregion_free(&consistent_head, c);
295}
296
Catalin Marinasab6494f2009-07-24 12:35:02 +0100297#else /* !CONFIG_MMU */
Russell King695ae0a2009-11-19 16:31:39 +0000298
Russell King31ebf942009-11-19 21:12:17 +0000299#define __dma_alloc_remap(page, size, gfp, prot) page_address(page)
300#define __dma_free_remap(addr, size) do { } while (0)
301
302#endif /* CONFIG_MMU */
303
Catalin Marinasab6494f2009-07-24 12:35:02 +0100304static void *
305__dma_alloc(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp,
306 pgprot_t prot)
307{
Russell King04da5692009-11-19 15:54:45 +0000308 struct page *page;
Russell King31ebf942009-11-19 21:12:17 +0000309 void *addr;
Catalin Marinasab6494f2009-07-24 12:35:02 +0100310
Catalin Marinasab6494f2009-07-24 12:35:02 +0100311 *handle = ~0;
Russell King04da5692009-11-19 15:54:45 +0000312 size = PAGE_ALIGN(size);
313
314 page = __dma_alloc_buffer(dev, size, gfp);
315 if (!page)
316 return NULL;
317
Russell King31ebf942009-11-19 21:12:17 +0000318 if (!arch_is_coherent())
319 addr = __dma_alloc_remap(page, size, gfp, prot);
320 else
321 addr = page_address(page);
322
323 if (addr)
Russell King9eedd962011-01-03 00:00:17 +0000324 *handle = pfn_to_dma(dev, page_to_pfn(page));
Russell King31ebf942009-11-19 21:12:17 +0000325
326 return addr;
Catalin Marinasab6494f2009-07-24 12:35:02 +0100327}
Russell King695ae0a2009-11-19 16:31:39 +0000328
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329/*
330 * Allocate DMA-coherent memory space and return both the kernel remapped
331 * virtual and bus address for that space.
332 */
333void *
Al Virof9e32142005-10-21 03:20:58 -0400334dma_alloc_coherent(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335{
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400336 void *memory;
337
338 if (dma_alloc_from_coherent(dev, size, handle, &memory))
339 return memory;
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341 return __dma_alloc(dev, size, handle, gfp,
Russell King26a26d32009-11-20 21:06:43 +0000342 pgprot_dmacoherent(pgprot_kernel));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343}
344EXPORT_SYMBOL(dma_alloc_coherent);
345
346/*
347 * Allocate a writecombining region, in much the same way as
348 * dma_alloc_coherent above.
349 */
350void *
Al Virof9e32142005-10-21 03:20:58 -0400351dma_alloc_writecombine(struct device *dev, size_t size, dma_addr_t *handle, gfp_t gfp)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352{
353 return __dma_alloc(dev, size, handle, gfp,
354 pgprot_writecombine(pgprot_kernel));
355}
356EXPORT_SYMBOL(dma_alloc_writecombine);
357
358static int dma_mmap(struct device *dev, struct vm_area_struct *vma,
359 void *cpu_addr, dma_addr_t dma_addr, size_t size)
360{
Catalin Marinasab6494f2009-07-24 12:35:02 +0100361 int ret = -ENXIO;
362#ifdef CONFIG_MMU
Russell King13ccf3a2009-11-19 15:07:04 +0000363 unsigned long user_size, kern_size;
364 struct arm_vmregion *c;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365
366 user_size = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
367
Russell King13ccf3a2009-11-19 15:07:04 +0000368 c = arm_vmregion_find(&consistent_head, (unsigned long)cpu_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369 if (c) {
370 unsigned long off = vma->vm_pgoff;
371
372 kern_size = (c->vm_end - c->vm_start) >> PAGE_SHIFT;
373
374 if (off < kern_size &&
375 user_size <= (kern_size - off)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 ret = remap_pfn_range(vma, vma->vm_start,
377 page_to_pfn(c->vm_pages) + off,
378 user_size << PAGE_SHIFT,
379 vma->vm_page_prot);
380 }
381 }
Catalin Marinasab6494f2009-07-24 12:35:02 +0100382#endif /* CONFIG_MMU */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
384 return ret;
385}
386
387int dma_mmap_coherent(struct device *dev, struct vm_area_struct *vma,
388 void *cpu_addr, dma_addr_t dma_addr, size_t size)
389{
Russell King26a26d32009-11-20 21:06:43 +0000390 vma->vm_page_prot = pgprot_dmacoherent(vma->vm_page_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
392}
393EXPORT_SYMBOL(dma_mmap_coherent);
394
395int dma_mmap_writecombine(struct device *dev, struct vm_area_struct *vma,
396 void *cpu_addr, dma_addr_t dma_addr, size_t size)
397{
398 vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
399 return dma_mmap(dev, vma, cpu_addr, dma_addr, size);
400}
401EXPORT_SYMBOL(dma_mmap_writecombine);
402
403/*
404 * free a page as defined by the above mapping.
Russell King5edf71a2005-11-25 15:52:51 +0000405 * Must not be called with IRQs disabled.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 */
407void dma_free_coherent(struct device *dev, size_t size, void *cpu_addr, dma_addr_t handle)
408{
Russell King5edf71a2005-11-25 15:52:51 +0000409 WARN_ON(irqs_disabled());
410
Dmitry Baryshkov1fe53262008-07-18 13:30:14 +0400411 if (dma_release_from_coherent(dev, get_order(size), cpu_addr))
412 return;
413
Russell King3e82d012009-11-19 15:38:12 +0000414 size = PAGE_ALIGN(size);
415
Russell King695ae0a2009-11-19 16:31:39 +0000416 if (!arch_is_coherent())
417 __dma_free_remap(cpu_addr, size);
Russell King7a9a32a2009-11-19 15:31:07 +0000418
Russell King9eedd962011-01-03 00:00:17 +0000419 __dma_free_buffer(pfn_to_page(dma_to_pfn(dev, handle)), size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700420}
421EXPORT_SYMBOL(dma_free_coherent);
422
423/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700424 * Make an area consistent for devices.
Dan Williams105ef9a2006-11-21 22:57:23 +0100425 * Note: Drivers should NOT use this function directly, as it will break
426 * platforms with CONFIG_DMABOUNCE.
427 * Use the driver DMA support - see dma-mapping.h (dma_sync_*)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 */
Russell King4ea0d732009-11-24 16:27:17 +0000429void ___dma_single_cpu_to_dev(const void *kaddr, size_t size,
430 enum dma_data_direction dir)
431{
Russell King2ffe2da2009-10-31 16:52:16 +0000432 unsigned long paddr;
433
Russell Kinga9c91472009-11-26 16:19:58 +0000434 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
435
436 dmac_map_area(kaddr, size, dir);
Russell King2ffe2da2009-10-31 16:52:16 +0000437
438 paddr = __pa(kaddr);
439 if (dir == DMA_FROM_DEVICE) {
440 outer_inv_range(paddr, paddr + size);
441 } else {
442 outer_clean_range(paddr, paddr + size);
443 }
444 /* FIXME: non-speculating: flush on bidirectional mappings? */
Russell King4ea0d732009-11-24 16:27:17 +0000445}
446EXPORT_SYMBOL(___dma_single_cpu_to_dev);
447
448void ___dma_single_dev_to_cpu(const void *kaddr, size_t size,
449 enum dma_data_direction dir)
450{
Russell Kinga9c91472009-11-26 16:19:58 +0000451 BUG_ON(!virt_addr_valid(kaddr) || !virt_addr_valid(kaddr + size - 1));
452
Russell King2ffe2da2009-10-31 16:52:16 +0000453 /* FIXME: non-speculating: not required */
454 /* don't bother invalidating if DMA to device */
455 if (dir != DMA_TO_DEVICE) {
456 unsigned long paddr = __pa(kaddr);
457 outer_inv_range(paddr, paddr + size);
458 }
459
Russell Kinga9c91472009-11-26 16:19:58 +0000460 dmac_unmap_area(kaddr, size, dir);
Russell King4ea0d732009-11-24 16:27:17 +0000461}
462EXPORT_SYMBOL(___dma_single_dev_to_cpu);
Russell Kingafd1a322008-09-25 16:30:57 +0100463
Russell King65af1912009-11-24 17:53:33 +0000464static void dma_cache_maint_page(struct page *page, unsigned long offset,
Russell Kinga9c91472009-11-26 16:19:58 +0000465 size_t size, enum dma_data_direction dir,
466 void (*op)(const void *, size_t, int))
Russell King65af1912009-11-24 17:53:33 +0000467{
468 /*
469 * A single sg entry may refer to multiple physically contiguous
470 * pages. But we still need to process highmem pages individually.
471 * If highmem is not configured then the bulk of this loop gets
472 * optimized out.
473 */
474 size_t left = size;
475 do {
476 size_t len = left;
Russell King93f1d622009-11-24 14:41:01 +0000477 void *vaddr;
478
479 if (PageHighMem(page)) {
480 if (len + offset > PAGE_SIZE) {
481 if (offset >= PAGE_SIZE) {
482 page += offset / PAGE_SIZE;
483 offset %= PAGE_SIZE;
484 }
485 len = PAGE_SIZE - offset;
Russell King65af1912009-11-24 17:53:33 +0000486 }
Russell King93f1d622009-11-24 14:41:01 +0000487 vaddr = kmap_high_get(page);
488 if (vaddr) {
489 vaddr += offset;
Russell Kinga9c91472009-11-26 16:19:58 +0000490 op(vaddr, len, dir);
Russell King93f1d622009-11-24 14:41:01 +0000491 kunmap_high(page);
Nicolas Pitre7e5a69e2010-03-29 21:46:02 +0100492 } else if (cache_is_vipt()) {
Nicolas Pitre39af22a2010-12-15 15:14:45 -0500493 /* unmapped pages might still be cached */
494 vaddr = kmap_atomic(page);
Nicolas Pitre7e5a69e2010-03-29 21:46:02 +0100495 op(vaddr + offset, len, dir);
Nicolas Pitre39af22a2010-12-15 15:14:45 -0500496 kunmap_atomic(vaddr);
Russell King93f1d622009-11-24 14:41:01 +0000497 }
498 } else {
499 vaddr = page_address(page) + offset;
Russell Kinga9c91472009-11-26 16:19:58 +0000500 op(vaddr, len, dir);
Russell King65af1912009-11-24 17:53:33 +0000501 }
Russell King65af1912009-11-24 17:53:33 +0000502 offset = 0;
503 page++;
504 left -= len;
505 } while (left);
506}
507
508void ___dma_page_cpu_to_dev(struct page *page, unsigned long off,
509 size_t size, enum dma_data_direction dir)
510{
Nicolas Pitre43377452009-03-12 22:52:09 -0400511 unsigned long paddr;
Nicolas Pitre43377452009-03-12 22:52:09 -0400512
Russell Kinga9c91472009-11-26 16:19:58 +0000513 dma_cache_maint_page(page, off, size, dir, dmac_map_area);
Nicolas Pitre43377452009-03-12 22:52:09 -0400514
Russell King65af1912009-11-24 17:53:33 +0000515 paddr = page_to_phys(page) + off;
Russell King2ffe2da2009-10-31 16:52:16 +0000516 if (dir == DMA_FROM_DEVICE) {
517 outer_inv_range(paddr, paddr + size);
518 } else {
519 outer_clean_range(paddr, paddr + size);
520 }
521 /* FIXME: non-speculating: flush on bidirectional mappings? */
Nicolas Pitre43377452009-03-12 22:52:09 -0400522}
Russell King4ea0d732009-11-24 16:27:17 +0000523EXPORT_SYMBOL(___dma_page_cpu_to_dev);
524
525void ___dma_page_dev_to_cpu(struct page *page, unsigned long off,
526 size_t size, enum dma_data_direction dir)
527{
Russell King2ffe2da2009-10-31 16:52:16 +0000528 unsigned long paddr = page_to_phys(page) + off;
529
530 /* FIXME: non-speculating: not required */
531 /* don't bother invalidating if DMA to device */
532 if (dir != DMA_TO_DEVICE)
533 outer_inv_range(paddr, paddr + size);
534
Russell Kinga9c91472009-11-26 16:19:58 +0000535 dma_cache_maint_page(page, off, size, dir, dmac_unmap_area);
Catalin Marinasc0177802010-09-13 15:57:36 +0100536
537 /*
538 * Mark the D-cache clean for this page to avoid extra flushing.
539 */
540 if (dir != DMA_TO_DEVICE && off == 0 && size >= PAGE_SIZE)
541 set_bit(PG_dcache_clean, &page->flags);
Russell King4ea0d732009-11-24 16:27:17 +0000542}
543EXPORT_SYMBOL(___dma_page_dev_to_cpu);
Nicolas Pitre43377452009-03-12 22:52:09 -0400544
Russell Kingafd1a322008-09-25 16:30:57 +0100545/**
546 * dma_map_sg - map a set of SG buffers for streaming mode DMA
547 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
548 * @sg: list of buffers
549 * @nents: number of buffers to map
550 * @dir: DMA transfer direction
551 *
552 * Map a set of buffers described by scatterlist in streaming mode for DMA.
553 * This is the scatter-gather version of the dma_map_single interface.
554 * Here the scatter gather list elements are each tagged with the
555 * appropriate dma address and length. They are obtained via
556 * sg_dma_{address,length}.
557 *
558 * Device ownership issues as mentioned for dma_map_single are the same
559 * here.
560 */
561int dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
562 enum dma_data_direction dir)
563{
564 struct scatterlist *s;
Russell King01135d922008-09-25 21:05:02 +0100565 int i, j;
Russell Kingafd1a322008-09-25 16:30:57 +0100566
Russell King24056f52011-01-03 11:29:28 +0000567 BUG_ON(!valid_dma_direction(dir));
568
Russell Kingafd1a322008-09-25 16:30:57 +0100569 for_each_sg(sg, s, nents, i) {
Russell King24056f52011-01-03 11:29:28 +0000570 s->dma_address = __dma_map_page(dev, sg_page(s), s->offset,
Russell King01135d922008-09-25 21:05:02 +0100571 s->length, dir);
572 if (dma_mapping_error(dev, s->dma_address))
573 goto bad_mapping;
Russell Kingafd1a322008-09-25 16:30:57 +0100574 }
Russell King24056f52011-01-03 11:29:28 +0000575 debug_dma_map_sg(dev, sg, nents, nents, dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100576 return nents;
Russell King01135d922008-09-25 21:05:02 +0100577
578 bad_mapping:
579 for_each_sg(sg, s, i, j)
Russell King24056f52011-01-03 11:29:28 +0000580 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
Russell King01135d922008-09-25 21:05:02 +0100581 return 0;
Russell Kingafd1a322008-09-25 16:30:57 +0100582}
583EXPORT_SYMBOL(dma_map_sg);
584
585/**
586 * dma_unmap_sg - unmap a set of SG buffers mapped by dma_map_sg
587 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
588 * @sg: list of buffers
Linus Walleij0adfca62011-01-12 18:50:37 +0100589 * @nents: number of buffers to unmap (same as was passed to dma_map_sg)
Russell Kingafd1a322008-09-25 16:30:57 +0100590 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
591 *
592 * Unmap a set of streaming mode DMA translations. Again, CPU access
593 * rules concerning calls here are the same as for dma_unmap_single().
594 */
595void dma_unmap_sg(struct device *dev, struct scatterlist *sg, int nents,
596 enum dma_data_direction dir)
597{
Russell King01135d922008-09-25 21:05:02 +0100598 struct scatterlist *s;
599 int i;
600
Russell King24056f52011-01-03 11:29:28 +0000601 debug_dma_unmap_sg(dev, sg, nents, dir);
602
Russell King01135d922008-09-25 21:05:02 +0100603 for_each_sg(sg, s, nents, i)
Russell King24056f52011-01-03 11:29:28 +0000604 __dma_unmap_page(dev, sg_dma_address(s), sg_dma_len(s), dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100605}
606EXPORT_SYMBOL(dma_unmap_sg);
607
608/**
609 * dma_sync_sg_for_cpu
610 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
611 * @sg: list of buffers
612 * @nents: number of buffers to map (returned from dma_map_sg)
613 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
614 */
615void dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg,
616 int nents, enum dma_data_direction dir)
617{
618 struct scatterlist *s;
619 int i;
620
621 for_each_sg(sg, s, nents, i) {
Russell King18eabe22009-10-31 16:52:16 +0000622 if (!dmabounce_sync_for_cpu(dev, sg_dma_address(s), 0,
623 sg_dma_len(s), dir))
624 continue;
625
626 __dma_page_dev_to_cpu(sg_page(s), s->offset,
627 s->length, dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100628 }
Russell King24056f52011-01-03 11:29:28 +0000629
630 debug_dma_sync_sg_for_cpu(dev, sg, nents, dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100631}
632EXPORT_SYMBOL(dma_sync_sg_for_cpu);
633
634/**
635 * dma_sync_sg_for_device
636 * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices
637 * @sg: list of buffers
638 * @nents: number of buffers to map (returned from dma_map_sg)
639 * @dir: DMA transfer direction (same as was passed to dma_map_sg)
640 */
641void dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg,
642 int nents, enum dma_data_direction dir)
643{
644 struct scatterlist *s;
645 int i;
646
647 for_each_sg(sg, s, nents, i) {
Russell King2638b4d2008-09-25 21:38:41 +0100648 if (!dmabounce_sync_for_device(dev, sg_dma_address(s), 0,
649 sg_dma_len(s), dir))
650 continue;
651
Russell King18eabe22009-10-31 16:52:16 +0000652 __dma_page_cpu_to_dev(sg_page(s), s->offset,
653 s->length, dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100654 }
Russell King24056f52011-01-03 11:29:28 +0000655
656 debug_dma_sync_sg_for_device(dev, sg, nents, dir);
Russell Kingafd1a322008-09-25 16:30:57 +0100657}
658EXPORT_SYMBOL(dma_sync_sg_for_device);
Russell King24056f52011-01-03 11:29:28 +0000659
660#define PREALLOC_DMA_DEBUG_ENTRIES 4096
661
662static int __init dma_debug_do_init(void)
663{
664 dma_debug_init(PREALLOC_DMA_DEBUG_ENTRIES);
665 return 0;
666}
667fs_initcall(dma_debug_do_init);