blob: 177fea8f7b711e90d87e208db5f9d36adaef3be6 [file] [log] [blame]
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -07001/*
2 * Copyright (C) 2004-2006 Atmel Corporation
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9#include <linux/dma-mapping.h>
10
11#include <asm/addrspace.h>
12#include <asm/cacheflush.h>
13
Ralf Baechled3fa72e2006-12-06 20:38:56 -080014void dma_cache_sync(struct device *dev, void *vaddr, size_t size, int direction)
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070015{
16 /*
17 * No need to sync an uncached area
18 */
19 if (PXSEG(vaddr) == P2SEG)
20 return;
21
22 switch (direction) {
23 case DMA_FROM_DEVICE: /* invalidate only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070024 invalidate_dcache_region(vaddr, size);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070025 break;
26 case DMA_TO_DEVICE: /* writeback only */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070027 clean_dcache_region(vaddr, size);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070028 break;
29 case DMA_BIDIRECTIONAL: /* writeback and invalidate */
Ralf Baechle622a9ed2007-10-16 23:29:42 -070030 flush_dcache_region(vaddr, size);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -070031 break;
32 default:
33 BUG();
34 }
35}
36EXPORT_SYMBOL(dma_cache_sync);
37
38static struct page *__dma_alloc(struct device *dev, size_t size,
39 dma_addr_t *handle, gfp_t gfp)
40{
41 struct page *page, *free, *end;
42 int order;
43
44 size = PAGE_ALIGN(size);
45 order = get_order(size);
46
47 page = alloc_pages(gfp, order);
48 if (!page)
49 return NULL;
50 split_page(page, order);
51
52 /*
53 * When accessing physical memory with valid cache data, we
54 * get a cache hit even if the virtual memory region is marked
55 * as uncached.
56 *
57 * Since the memory is newly allocated, there is no point in
58 * doing a writeback. If the previous owner cares, he should
59 * have flushed the cache before releasing the memory.
60 */
61 invalidate_dcache_region(phys_to_virt(page_to_phys(page)), size);
62
63 *handle = page_to_bus(page);
64 free = page + (size >> PAGE_SHIFT);
65 end = page + (1 << order);
66
67 /*
68 * Free any unused pages
69 */
70 while (free < end) {
71 __free_page(free);
72 free++;
73 }
74
75 return page;
76}
77
78static void __dma_free(struct device *dev, size_t size,
79 struct page *page, dma_addr_t handle)
80{
81 struct page *end = page + (PAGE_ALIGN(size) >> PAGE_SHIFT);
82
83 while (page < end)
84 __free_page(page++);
85}
86
87void *dma_alloc_coherent(struct device *dev, size_t size,
88 dma_addr_t *handle, gfp_t gfp)
89{
90 struct page *page;
91 void *ret = NULL;
92
93 page = __dma_alloc(dev, size, handle, gfp);
94 if (page)
95 ret = phys_to_uncached(page_to_phys(page));
96
97 return ret;
98}
99EXPORT_SYMBOL(dma_alloc_coherent);
100
101void dma_free_coherent(struct device *dev, size_t size,
102 void *cpu_addr, dma_addr_t handle)
103{
104 void *addr = phys_to_cached(uncached_to_phys(cpu_addr));
105 struct page *page;
106
107 pr_debug("dma_free_coherent addr %p (phys %08lx) size %u\n",
108 cpu_addr, (unsigned long)handle, (unsigned)size);
109 BUG_ON(!virt_addr_valid(addr));
110 page = virt_to_page(addr);
111 __dma_free(dev, size, page, handle);
112}
113EXPORT_SYMBOL(dma_free_coherent);
114
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700115void *dma_alloc_writecombine(struct device *dev, size_t size,
116 dma_addr_t *handle, gfp_t gfp)
117{
118 struct page *page;
Haavard Skinnemoena492dbb2007-03-20 14:41:13 +0100119 dma_addr_t phys;
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700120
121 page = __dma_alloc(dev, size, handle, gfp);
Haavard Skinnemoena492dbb2007-03-20 14:41:13 +0100122 if (!page)
123 return NULL;
124
125 phys = page_to_phys(page);
126 *handle = phys;
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700127
128 /* Now, map the page into P3 with write-combining turned on */
Haavard Skinnemoena492dbb2007-03-20 14:41:13 +0100129 return __ioremap(phys, size, _PAGE_BUFFER);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700130}
131EXPORT_SYMBOL(dma_alloc_writecombine);
132
133void dma_free_writecombine(struct device *dev, size_t size,
134 void *cpu_addr, dma_addr_t handle)
135{
136 struct page *page;
137
138 iounmap(cpu_addr);
139
Haavard Skinnemoena492dbb2007-03-20 14:41:13 +0100140 page = phys_to_page(handle);
Haavard Skinnemoen5f97f7f2006-09-25 23:32:13 -0700141 __dma_free(dev, size, page, handle);
142}
143EXPORT_SYMBOL(dma_free_writecombine);