blob: 50cdb5b10f0fc7b8bdfae25ffb9b32d5655463de [file] [log] [blame]
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -07001/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/dma-mapping.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090010#include <linux/gfp.h>
Paul Gortmaker09cf6a22011-08-01 12:55:26 -040011#include <linux/export.h>
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070012
13#include <asm/addrspace.h>
14#include <asm/cacheflush.h>
15
Ralf Baechled3fa72e2006-12-06 20:38:56 -080016void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070017{
18 /*
19 * No need to sync an uncached area
20 */
21 if (PXSEG(vaddr) == P2SEG)
22 return;
23
24 switch (direction) {
25 case DMA_FROM_DEVICE: /* invalidate only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070026 invalidate_dcache_region(vaddr, size);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070027 break;
28 case DMA_TO_DEVICE: /* writeback only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070029 clean_dcache_region(vaddr, size);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070030 break;
31 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070032 flush_dcache_region(vaddr, size);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070033 break;
34 default:
35 BUG();
36 }
37}
38EXPORT_SYMBOL(dma_cache_sync);
39
40static struct page *__dma_alloc(struct device *dev, size_t size,
41 dma_addr_t *handle, gfp_t gfp)
42{
43 struct page *page, *free, *end;
44 int order;
45
Haavard Skinnemoen36115532007-07-05 17:08:09 +020046 /* Following is a work-around (a.k.a. hack) to prevent pages
47 * with __GFP_COMP being passed to split_page() which cannot
48 * handle them. The real problem is that this flag probably
49 * should be 0 on AVR32 as it is not supported on this
50 * platform--see CONFIG_HUGETLB_PAGE. */
51 gfp &= ~(__GFP_COMP);
52
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070053 size = PAGE_ALIGN(size);
54 order = get_order(size);
55
56 page = alloc_pages(gfp, order);
57 if (!page)
58 return NULL;
59 split_page(page, order);
60
61 /*
62 * When accessing physical memory with valid cache data, we
63 * get a cache hit even if the virtual memory region is marked
64 * as uncached.
65 *
66 * Since the memory is newly allocated, there is no point in
67 * doing a writeback. If the previous owner cares, he should
68 * have flushed the cache before releasing the memory.
69 */
70 invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
71
72 *handle = page_to_bus(page);
73 free = page + (size >> PAGE_SHIFT);
74 end = page + (1 << order);
75
76 /*
77 * Free any unused pages
78 */
79 while (free < end) {
80 __free_page(free);
81 free++;
82 }
83
84 return page;
85}
86
87static void __dma_free(struct device *dev, size_t size,
88 struct page *page, dma_addr_t handle)
89{
90 struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
91
92 while (page < end)
93 __free_page(page++);
94}
95
96void *dma_alloc_coherent(struct device *dev, size_t size,
97 dma_addr_t *handle, gfp_t gfp)
98{
99 struct page *page;
100 void *ret = NULL;
101
102 page = __dma_alloc(dev, size, handle, gfp);
103 if (page)
104 ret = phys_to_uncached(page_to_phys(page));
105
106 return ret;
107}
108EXPORT_SYMBOL(dma_alloc_coherent);
109
110void dma_free_coherent(struct device *dev, size_t size,
111 void *cpu_addr, dma_addr_t handle)
112{
113 void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
114 struct page *page;
115
116 pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
117 cpu_addr, (unsigned long)handle, (unsigned)size);
118 BUG_ON(!virt_addr_valid(addr));
119 page = virt_to_page(addr);
120 __dma_free(dev, size, page, handle);
121}
122EXPORT_SYMBOL(dma_free_coherent);
123
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700124void *dma_alloc_writecombine(struct device *dev, size_t size,
125 dma_addr_t *handle, gfp_t gfp)
126{
127 struct page *page;
Haavard Skinnemoena492dbb2007-03-20 14:41:13 +0100128 dma_addr_t phys;
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700129
130 page = __dma_alloc(dev, size, handle, gfp);
Haavard Skinnemoena492dbb2007-03-20 14:41:13 +0100131 if (!page)
132 return NULL;
133
134 phys = page_to_phys(page);
135 *handle = phys;
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700136
137 /* Now, map the page into P3 with write-combining turned on */
Haavard Skinnemoena492dbb2007-03-20 14:41:13 +0100138 return __ioremap(phys, size, _PAGE_BUFFER);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700139}
140EXPORT_SYMBOL(dma_alloc_writecombine);
141
142void dma_free_writecombine(struct device *dev, size_t size,
143 void *cpu_addr, dma_addr_t handle)
144{
145 struct page *page;
146
147 iounmap(cpu_addr);
148
Haavard Skinnemoena492dbb2007-03-20 14:41:13 +0100149 page = phys_to_page(handle);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700150 __dma_free(dev, size, page, handle);
151}
152EXPORT_SYMBOL(dma_free_writecombine);