blob: b06c3a7faf20b51724fadd84f02ca33cc7390cc6 [file] [log] [blame]
Michal Simek3a0d7a42010-02-22 12:16:08 +01001/*
2 * Microblaze support for cache consistent memory.
3 * Copyright (C) 2010 Michal Simek <monstr@monstr.eu>
4 * Copyright (C) 2010 PetaLogix
5 * Copyright (C) 2005 John Williams <jwilliams@itee.uq.edu.au>
6 *
7 * Based on PowerPC version derived from arch/arm/mm/consistent.c
8 * Copyright (C) 2001 Dan Malek (dmalek@jlc.net)
9 * Copyright (C) 2000 Russell King
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 */
15
Michal Simekd64af912013-02-01 13:10:35 +010016#include <linux/export.h>
Michal Simek3a0d7a42010-02-22 12:16:08 +010017#include <linux/signal.h>
18#include <linux/sched.h>
19#include <linux/kernel.h>
20#include <linux/errno.h>
21#include <linux/string.h>
22#include <linux/types.h>
23#include <linux/ptrace.h>
24#include <linux/mman.h>
25#include <linux/mm.h>
26#include <linux/swap.h>
27#include <linux/stddef.h>
28#include <linux/vmalloc.h>
29#include <linux/init.h>
30#include <linux/delay.h>
31#include <linux/bootmem.h>
32#include <linux/highmem.h>
33#include <linux/pci.h>
34#include <linux/interrupt.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090035#include <linux/gfp.h>
Michal Simek3a0d7a42010-02-22 12:16:08 +010036
37#include <asm/pgalloc.h>
38#include <linux/io.h>
39#include <linux/hardirq.h>
Michal Simek6bd55f02012-12-27 10:40:38 +010040#include <linux/mmu_context.h>
Michal Simek3a0d7a42010-02-22 12:16:08 +010041#include <asm/mmu.h>
42#include <linux/uaccess.h>
43#include <asm/pgtable.h>
44#include <asm/cpuinfo.h>
Michal Simekf1525762010-04-10 17:34:06 +020045#include <asm/tlbflush.h>
Michal Simek3a0d7a42010-02-22 12:16:08 +010046
47#ifndef CONFIG_MMU
Michal Simek3a0d7a42010-02-22 12:16:08 +010048/* I have to use dcache values because I can't relate on ram size */
Michal Simekf1525762010-04-10 17:34:06 +020049# define UNCACHED_SHADOW_MASK (cpuinfo.dcache_high - cpuinfo.dcache_base + 1)
50#endif
Michal Simek3a0d7a42010-02-22 12:16:08 +010051
52/*
53 * Consistent memory allocators. Used for DMA devices that want to
54 * share uncached memory with the processor core.
55 * My crufty no-MMU approach is simple. In the HW platform we can optionally
56 * mirror the DDR up above the processor cacheable region. So, memory accessed
57 * in this mirror region will not be cached. It's alloced from the same
58 * pool as normal memory, but the handle we return is shifted up into the
59 * uncached region. This will no doubt cause big problems if memory allocated
60 * here is not also freed properly. -- JW
61 */
Michal Simekcd44da12011-02-07 11:42:37 +010062void *consistent_alloc(gfp_t gfp, size_t size, dma_addr_t *dma_handle)
Michal Simek3a0d7a42010-02-22 12:16:08 +010063{
Michal Simekf1525762010-04-10 17:34:06 +020064 unsigned long order, vaddr;
65 void *ret;
66 unsigned int i, err = 0;
67 struct page *page, *end;
Michal Simek3a0d7a42010-02-22 12:16:08 +010068
Michal Simekf1525762010-04-10 17:34:06 +020069#ifdef CONFIG_MMU
Michal Simek3a0d7a42010-02-22 12:16:08 +010070 phys_addr_t pa;
71 struct vm_struct *area;
Michal Simekf1525762010-04-10 17:34:06 +020072 unsigned long va;
73#endif
Michal Simek3a0d7a42010-02-22 12:16:08 +010074
75 if (in_interrupt())
76 BUG();
77
78 /* Only allocate page size areas. */
79 size = PAGE_ALIGN(size);
80 order = get_order(size);
81
Michal Simekf1525762010-04-10 17:34:06 +020082 vaddr = __get_free_pages(gfp, order);
83 if (!vaddr)
Michal Simek3a0d7a42010-02-22 12:16:08 +010084 return NULL;
Michal Simek3a0d7a42010-02-22 12:16:08 +010085
86 /*
87 * we need to ensure that there are no cachelines in use,
88 * or worse dirty in this area.
89 */
Michal Simekf1525762010-04-10 17:34:06 +020090 flush_dcache_range(virt_to_phys((void *)vaddr),
91 virt_to_phys((void *)vaddr) + size);
Michal Simek3a0d7a42010-02-22 12:16:08 +010092
Michal Simekf1525762010-04-10 17:34:06 +020093#ifndef CONFIG_MMU
94 ret = (void *)vaddr;
95 /*
96 * Here's the magic! Note if the uncached shadow is not implemented,
97 * it's up to the calling code to also test that condition and make
98 * other arranegments, such as manually flushing the cache and so on.
99 */
100# ifdef CONFIG_XILINX_UNCACHED_SHADOW
101 ret = (void *)((unsigned) ret | UNCACHED_SHADOW_MASK);
102# endif
103 if ((unsigned int)ret > cpuinfo.dcache_base &&
104 (unsigned int)ret < cpuinfo.dcache_high)
Michal Simek6bd55f02012-12-27 10:40:38 +0100105 pr_warn("ERROR: Your cache coherent area is CACHED!!!\n");
Michal Simekf1525762010-04-10 17:34:06 +0200106
107 /* dma_handle is same as physical (shadowed) address */
108 *dma_handle = (dma_addr_t)ret;
109#else
Michal Simek3a0d7a42010-02-22 12:16:08 +0100110 /* Allocate some common virtual space to map the new pages. */
111 area = get_vm_area(size, VM_ALLOC);
Michal Simekf1525762010-04-10 17:34:06 +0200112 if (!area) {
113 free_pages(vaddr, order);
Michal Simek3a0d7a42010-02-22 12:16:08 +0100114 return NULL;
115 }
116 va = (unsigned long) area->addr;
117 ret = (void *)va;
118
119 /* This gives us the real physical address of the first page. */
Michal Simeka66a6262013-02-07 15:12:24 +0100120 *dma_handle = pa = __virt_to_phys(vaddr);
Michal Simekf1525762010-04-10 17:34:06 +0200121#endif
Michal Simek3a0d7a42010-02-22 12:16:08 +0100122
123 /*
Michal Simekf1525762010-04-10 17:34:06 +0200124 * free wasted pages. We skip the first page since we know
125 * that it will have count = 1 and won't require freeing.
126 * We also mark the pages in use as reserved so that
127 * remap_page_range works.
Michal Simek3a0d7a42010-02-22 12:16:08 +0100128 */
Michal Simekf1525762010-04-10 17:34:06 +0200129 page = virt_to_page(vaddr);
130 end = page + (1 << order);
131
132 split_page(page, order);
133
134 for (i = 0; i < size && err == 0; i += PAGE_SIZE) {
135#ifdef CONFIG_MMU
136 /* MS: This is the whole magic - use cache inhibit pages */
137 err = map_page(va + i, pa + i, _PAGE_KERNEL | _PAGE_NO_CACHE);
138#endif
139
140 SetPageReserved(page);
141 page++;
Michal Simek3a0d7a42010-02-22 12:16:08 +0100142 }
143
Michal Simekf1525762010-04-10 17:34:06 +0200144 /* Free the otherwise unused pages. */
145 while (page < end) {
146 __free_page(page);
147 page++;
148 }
Michal Simek3a0d7a42010-02-22 12:16:08 +0100149
150 if (err) {
Michal Simekf1525762010-04-10 17:34:06 +0200151 free_pages(vaddr, order);
Michal Simek3a0d7a42010-02-22 12:16:08 +0100152 return NULL;
153 }
154
155 return ret;
156}
Michal Simek3a0d7a42010-02-22 12:16:08 +0100157EXPORT_SYMBOL(consistent_alloc);
158
Lars-Peter Clausen3a8e3262014-12-03 16:07:28 +0100159#ifdef CONFIG_MMU
160static pte_t *consistent_virt_to_pte(void *vaddr)
161{
162 unsigned long addr = (unsigned long)vaddr;
163
164 return pte_offset_kernel(pmd_offset(pgd_offset_k(addr), addr), addr);
165}
166
167unsigned long consistent_virt_to_pfn(void *vaddr)
168{
169 pte_t *ptep = consistent_virt_to_pte(vaddr);
170
171 if (pte_none(*ptep) || !pte_present(*ptep))
172 return 0;
173
174 return pte_pfn(*ptep);
175}
176#endif
177
Michal Simek3a0d7a42010-02-22 12:16:08 +0100178/*
179 * free page(s) as defined by the above mapping.
180 */
Michal Simekf1525762010-04-10 17:34:06 +0200181void consistent_free(size_t size, void *vaddr)
Michal Simek3a0d7a42010-02-22 12:16:08 +0100182{
Michal Simekf1525762010-04-10 17:34:06 +0200183 struct page *page;
184
Michal Simek3a0d7a42010-02-22 12:16:08 +0100185 if (in_interrupt())
186 BUG();
187
Michal Simekf1525762010-04-10 17:34:06 +0200188 size = PAGE_ALIGN(size);
189
190#ifndef CONFIG_MMU
Michal Simek3a0d7a42010-02-22 12:16:08 +0100191 /* Clear SHADOW_MASK bit in address, and free as per usual */
Michal Simekf1525762010-04-10 17:34:06 +0200192# ifdef CONFIG_XILINX_UNCACHED_SHADOW
Michal Simek3a0d7a42010-02-22 12:16:08 +0100193 vaddr = (void *)((unsigned)vaddr & ~UNCACHED_SHADOW_MASK);
Michal Simekf1525762010-04-10 17:34:06 +0200194# endif
195 page = virt_to_page(vaddr);
196
197 do {
Xishi Qiuc1ce4b32013-11-12 15:07:13 -0800198 __free_reserved_page(page);
Michal Simekf1525762010-04-10 17:34:06 +0200199 page++;
200 } while (size -= PAGE_SIZE);
201#else
202 do {
Lars-Peter Clausen3a8e3262014-12-03 16:07:28 +0100203 pte_t *ptep = consistent_virt_to_pte(vaddr);
Michal Simekf1525762010-04-10 17:34:06 +0200204 unsigned long pfn;
205
Michal Simekf1525762010-04-10 17:34:06 +0200206 if (!pte_none(*ptep) && pte_present(*ptep)) {
207 pfn = pte_pfn(*ptep);
208 pte_clear(&init_mm, (unsigned int)vaddr, ptep);
209 if (pfn_valid(pfn)) {
210 page = pfn_to_page(pfn);
Xishi Qiuc1ce4b32013-11-12 15:07:13 -0800211 __free_reserved_page(page);
Michal Simekf1525762010-04-10 17:34:06 +0200212 }
213 }
214 vaddr += PAGE_SIZE;
215 } while (size -= PAGE_SIZE);
216
217 /* flush tlb */
218 flush_tlb_all();
Michal Simek3a0d7a42010-02-22 12:16:08 +0100219#endif
Michal Simek3a0d7a42010-02-22 12:16:08 +0100220}
221EXPORT_SYMBOL(consistent_free);
222
223/*
224 * make an area consistent.
225 */
226void consistent_sync(void *vaddr, size_t size, int direction)
227{
228 unsigned long start;
229 unsigned long end;
230
231 start = (unsigned long)vaddr;
232
233 /* Convert start address back down to unshadowed memory region */
234#ifdef CONFIG_XILINX_UNCACHED_SHADOW
235 start &= ~UNCACHED_SHADOW_MASK;
236#endif
237 end = start + size;
238
239 switch (direction) {
240 case PCI_DMA_NONE:
241 BUG();
242 case PCI_DMA_FROMDEVICE: /* invalidate only */
Michal Simek385e1ef2010-04-29 13:02:17 +0200243 invalidate_dcache_range(start, end);
Michal Simek3a0d7a42010-02-22 12:16:08 +0100244 break;
245 case PCI_DMA_TODEVICE: /* writeback only */
246 flush_dcache_range(start, end);
247 break;
248 case PCI_DMA_BIDIRECTIONAL: /* writeback and invalidate */
249 flush_dcache_range(start, end);
250 break;
251 }
252}
253EXPORT_SYMBOL(consistent_sync);
254
255/*
256 * consistent_sync_page makes memory consistent. identical
257 * to consistent_sync, but takes a struct page instead of a
258 * virtual address
259 */
260void consistent_sync_page(struct page *page, unsigned long offset,
261 size_t size, int direction)
262{
263 unsigned long start = (unsigned long)page_address(page) + offset;
264 consistent_sync((void *)start, size, direction);
265}
266EXPORT_SYMBOL(consistent_sync_page);