blob: 3c0042247ea93661adbf504237be51e3d7730cec [file] [log] [blame]
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -07001/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/gfp.h>
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070011
12#include <asm/addrspace.h>
13#include <asm/cacheflush.h>
14
Ralf Baechled3fa72e2006-12-06 20:38:56 -080015void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070016{
17 /*
18 * No need to sync an uncached area
19 */
20 if (PXSEG(vaddr) == P2SEG)
21 return;
22
23 switch (direction) {
24 case DMA_FROM_DEVICE: /* invalidate only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070025 invalidate_dcache_region(vaddr, size);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070026 break;
27 case DMA_TO_DEVICE: /* writeback only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070028 clean_dcache_region(vaddr, size);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070029 break;
30 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070031 flush_dcache_region(vaddr, size);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070032 break;
33 default:
34 BUG();
35 }
36}
37EXPORT_SYMBOL(dma_cache_sync);
38
39static struct page *__dma_alloc(struct device *dev, size_t size,
40 dma_addr_t *handle, gfp_t gfp)
41{
42 struct page *page, *free, *end;
43 int order;
44
Haavard Skinnemoen36115532007-07-05 17:08:09 +020045 /* Following is a work-around (a.k.a. hack) to prevent pages
46 * with __GFP_COMP being passed to split_page() which cannot
47 * handle them. The real problem is that this flag probably
48 * should be 0 on AVR32 as it is not supported on this
49 * platform--see CONFIG_HUGETLB_PAGE. */
50 gfp &= ~(__GFP_COMP);
51
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070052 size = PAGE_ALIGN(size);
53 order = get_order(size);
54
55 page = alloc_pages(gfp, order);
56 if (!page)
57 return NULL;
58 split_page(page, order);
59
60 /*
61 * When accessing physical memory with valid cache data, we
62 * get a cache hit even if the virtual memory region is marked
63 * as uncached.
64 *
65 * Since the memory is newly allocated, there is no point in
66 * doing a writeback. If the previous owner cares, he should
67 * have flushed the cache before releasing the memory.
68 */
69 invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
70
71 *handle = page_to_bus(page);
72 free = page + (size >> PAGE_SHIFT);
73 end = page + (1 << order);
74
75 /*
76 * Free any unused pages
77 */
78 while (free < end) {
79 __free_page(free);
80 free++;
81 }
82
83 return page;
84}
85
86static void __dma_free(struct device *dev, size_t size,
87 struct page *page, dma_addr_t handle)
88{
89 struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
90
91 while (page < end)
92 __free_page(page++);
93}
94
95void *dma_alloc_coherent(struct device *dev, size_t size,
96 dma_addr_t *handle, gfp_t gfp)
97{
98 struct page *page;
99 void *ret = NULL;
100
101 page = __dma_alloc(dev, size, handle, gfp);
102 if (page)
103 ret = phys_to_uncached(page_to_phys(page));
104
105 return ret;
106}
107EXPORT_SYMBOL(dma_alloc_coherent);
108
109void dma_free_coherent(struct device *dev, size_t size,
110 void *cpu_addr, dma_addr_t handle)
111{
112 void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
113 struct page *page;
114
115 pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
116 cpu_addr, (unsigned long)handle, (unsigned)size);
117 BUG_ON(!virt_addr_valid(addr));
118 page = virt_to_page(addr);
119 __dma_free(dev, size, page, handle);
120}
121EXPORT_SYMBOL(dma_free_coherent);
122
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700123void *dma_alloc_writecombine(struct device *dev, size_t size,
124 dma_addr_t *handle, gfp_t gfp)
125{
126 struct page *page;
Haavard Skinnemoena492dbb2007-03-20 14:41:13 +0100127 dma_addr_t phys;
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700128
129 page = __dma_alloc(dev, size, handle, gfp);
Haavard Skinnemoena492dbb2007-03-20 14:41:13 +0100130 if (!page)
131 return NULL;
132
133 phys = page_to_phys(page);
134 *handle = phys;
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700135
136 /* Now, map the page into P3 with write-combining turned on */
Haavard Skinnemoena492dbb2007-03-20 14:41:13 +0100137 return __ioremap(phys, size, _PAGE_BUFFER);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700138}
139EXPORT_SYMBOL(dma_alloc_writecombine);
140
141void dma_free_writecombine(struct device *dev, size_t size,
142 void *cpu_addr, dma_addr_t handle)
143{
144 struct page *page;
145
146 iounmap(cpu_addr);
147
Haavard Skinnemoena492dbb2007-03-20 14:41:13 +0100148 page = phys_to_page(handle);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700149 __dma_free(dev, size, page, handle);
150}
151EXPORT_SYMBOL(dma_free_writecombine);