blob: a9b443e3fb9899e2d7fc5b722df361b0c37acbe7 [file] [log] [blame]
Michal Simek3a0d7a42010-02-22 12:16:08 +01001/*
2 * Microblaze support for cache consistent memory.
3 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
4 * Copyright (C) 2010 PetaLogix
5 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
6 *
7 * Based on PowerPC version derived from arch/arm/mm/consistent.c
8 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
9 * Copyright (C) 2000 Russell King
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
16#include <linux/module.h>
17#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/mm.h>
26#include <linux/swap.h>
27#include <linux/stddef.h>
28#include <linux/vmalloc.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
35
36#include <asm/pgalloc.h>
37#include <linux/io.h>
38#include <linux/hardirq.h>
39#include <asm/mmu_context.h>
40#include <asm/mmu.h>
41#include <linux/uaccess.h>
42#include <asm/pgtable.h>
43#include <asm/cpuinfo.h>
44
45#ifndef CONFIG_MMU
46
47/* I have to use dcache values because I can't relate on ram size */
48#define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
49
50/*
51 * Consistent memory allocators. Used for DMA devices that want to
52 * share uncached memory with the processor core.
53 * My crufty no-MMU approach is simple. In the HW platform we can optionally
54 * mirror the DDR up above the processor cacheable region. So, memory accessed
55 * in this mirror region will not be cached. It's alloced from the same
56 * pool as normal memory, but the handle we return is shifted up into the
57 * uncached region. This will no doubt cause big problems if memory allocated
58 * here is not also freed properly. -- JW
59 */
60void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
61{
62 struct page *page, *end, *free;
63 unsigned long order;
64 void *ret, *virt;
65
66 if (in_interrupt())
67 BUG();
68
69 size = PAGE_ALIGN(size);
70 order = get_order(size);
71
72 page = alloc_pages(gfp, order);
73 if (!page)
74 goto no_page;
75
76 /* We could do with a page_to_phys and page_to_bus here. */
77 virt = page_address(page);
78 ret = ioremap(virt_to_phys(virt), size);
79 if (!ret)
80 goto no_remap;
81
82 /*
83 * Here's the magic! Note if the uncached shadow is not implemented,
84 * it's up to the calling code to also test that condition and make
85 * other arranegments, such as manually flushing the cache and so on.
86 */
87#ifdef CONFIG_XILINX_UNCACHED_SHADOW
88 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
89#endif
90 /* dma_handle is same as physical (shadowed) address */
91 *dma_handle = (dma_addr_t)ret;
92
93 /*
94 * free wasted pages. We skip the first page since we know
95 * that it will have count = 1 and won't require freeing.
96 * We also mark the pages in use as reserved so that
97 * remap_page_range works.
98 */
99 page = virt_to_page(virt);
100 free = page + (size >> PAGE_SHIFT);
101 end = page + (1 << order);
102
103 for (; page < end; page++) {
104 init_page_count(page);
105 if (page >= free)
106 __free_page(page);
107 else
108 SetPageReserved(page);
109 }
110
111 return ret;
112no_remap:
113 __free_pages(page, order);
114no_page:
115 return NULL;
116}
117
118#else
119
120void *consistent_alloc(int gfp, size_t size, dma_addr_t *dma_handle)
121{
122 int order, err, i;
123 unsigned long page, va, flags;
124 phys_addr_t pa;
125 struct vm_struct *area;
126 void *ret;
127
128 if (in_interrupt())
129 BUG();
130
131 /* Only allocate page size areas. */
132 size = PAGE_ALIGN(size);
133 order = get_order(size);
134
135 page = __get_free_pages(gfp, order);
136 if (!page) {
137 BUG();
138 return NULL;
139 }
140
141 /*
142 * we need to ensure that there are no cachelines in use,
143 * or worse dirty in this area.
144 */
145 flush_dcache_range(virt_to_phys(page), virt_to_phys(page) + size);
146
147 /* Allocate some common virtual space to map the new pages. */
148 area = get_vm_area(size, VM_ALLOC);
149 if (area == NULL) {
150 free_pages(page, order);
151 return NULL;
152 }
153 va = (unsigned long) area->addr;
154 ret = (void *)va;
155
156 /* This gives us the real physical address of the first page. */
157 *dma_handle = pa = virt_to_bus((void *)page);
158
159 /* MS: This is the whole magic - use cache inhibit pages */
160 flags = _PAGE_KERNEL | _PAGE_NO_CACHE;
161
162 /*
163 * Set refcount=1 on all pages in an order>0
164 * allocation so that vfree() will actually
165 * free all pages that were allocated.
166 */
167 if (order > 0) {
168 struct page *rpage = virt_to_page(page);
169 for (i = 1; i < (1 << order); i++)
170 init_page_count(rpage+i);
171 }
172
173 err = 0;
174 for (i = 0; i < size && err == 0; i += PAGE_SIZE)
175 err = map_page(va+i, pa+i, flags);
176
177 if (err) {
178 vfree((void *)va);
179 return NULL;
180 }
181
182 return ret;
183}
184#endif /* CONFIG_MMU */
185EXPORT_SYMBOL(consistent_alloc);
186
187/*
188 * free page(s) as defined by the above mapping.
189 */
190void consistent_free(void *vaddr)
191{
192 if (in_interrupt())
193 BUG();
194
195 /* Clear SHADOW_MASK bit in address, and free as per usual */
196#ifdef CONFIG_XILINX_UNCACHED_SHADOW
197 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
198#endif
199 vfree(vaddr);
200}
201EXPORT_SYMBOL(consistent_free);
202
203/*
204 * make an area consistent.
205 */
206void consistent_sync(void *vaddr, size_t size, int direction)
207{
208 unsigned long start;
209 unsigned long end;
210
211 start = (unsigned long)vaddr;
212
213 /* Convert start address back down to unshadowed memory region */
214#ifdef CONFIG_XILINX_UNCACHED_SHADOW
215 start &= ~UNCACHED_SHADOW_MASK;
216#endif
217 end = start + size;
218
219 switch (direction) {
220 case PCI_DMA_NONE:
221 BUG();
222 case PCI_DMA_FROMDEVICE: /* invalidate only */
223 flush_dcache_range(start, end);
224 break;
225 case PCI_DMA_TODEVICE: /* writeback only */
226 flush_dcache_range(start, end);
227 break;
228 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
229 flush_dcache_range(start, end);
230 break;
231 }
232}
233EXPORT_SYMBOL(consistent_sync);
234
235/*
236 * consistent_sync_page makes memory consistent. identical
237 * to consistent_sync, but takes a struct page instead of a
238 * virtual address
239 */
240void consistent_sync_page(struct page *page, unsigned long offset,
241 size_t size, int direction)
242{
243 unsigned long start = (unsigned long)page_address(page) + offset;
244 consistent_sync((void *)start, size, direction);
245}
246EXPORT_SYMBOL(consistent_sync_page);