blob: 3a28177a01ebe5fa54885fa471d62ccbdeb3bad7 [file] [log] [blame]
Ley Foon Tan6b8baec2014-11-06 15:19:42 +08001/*
2 * Copyright (C) 2010 Tobias Klauser <tklauser@distanz.ch>
3 * Copyright (C) 2009 Wind River Systems Inc
4 * Implemented by fredrik.markstrom@gmail.com and ivarholmqvist@gmail.com
5 * Copyright (C) 2004 Microtronix Datacom Ltd.
6 *
7 * This file is subject to the terms and conditions of the GNU General Public
8 * License. See the file "COPYING" in the main directory of this archive
9 * for more details.
10 */
11
12#include <linux/export.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15#include <linux/slab.h>
16#include <linux/vmalloc.h>
17#include <linux/io.h>
18
19#include <asm/cacheflush.h>
20#include <asm/tlbflush.h>
21
22static inline void remap_area_pte(pte_t *pte, unsigned long address,
23 unsigned long size, unsigned long phys_addr,
24 unsigned long flags)
25{
26 unsigned long end;
27 unsigned long pfn;
28 pgprot_t pgprot = __pgprot(_PAGE_GLOBAL | _PAGE_PRESENT | _PAGE_READ
29 | _PAGE_WRITE | flags);
30
31 address &= ~PMD_MASK;
32 end = address + size;
33 if (end > PMD_SIZE)
34 end = PMD_SIZE;
35 if (address >= end)
36 BUG();
37 pfn = PFN_DOWN(phys_addr);
38 do {
39 if (!pte_none(*pte)) {
40 pr_err("remap_area_pte: page already exists\n");
41 BUG();
42 }
43 set_pte(pte, pfn_pte(pfn, pgprot));
44 address += PAGE_SIZE;
45 pfn++;
46 pte++;
47 } while (address && (address < end));
48}
49
50static inline int remap_area_pmd(pmd_t *pmd, unsigned long address,
51 unsigned long size, unsigned long phys_addr,
52 unsigned long flags)
53{
54 unsigned long end;
55
56 address &= ~PGDIR_MASK;
57 end = address + size;
58 if (end > PGDIR_SIZE)
59 end = PGDIR_SIZE;
60 phys_addr -= address;
61 if (address >= end)
62 BUG();
63 do {
64 pte_t *pte = pte_alloc_kernel(pmd, address);
65
66 if (!pte)
67 return -ENOMEM;
68 remap_area_pte(pte, address, end - address, address + phys_addr,
69 flags);
70 address = (address + PMD_SIZE) & PMD_MASK;
71 pmd++;
72 } while (address && (address < end));
73 return 0;
74}
75
76static int remap_area_pages(unsigned long address, unsigned long phys_addr,
77 unsigned long size, unsigned long flags)
78{
79 int error;
80 pgd_t *dir;
81 unsigned long end = address + size;
82
83 phys_addr -= address;
84 dir = pgd_offset(&init_mm, address);
85 flush_cache_all();
86 if (address >= end)
87 BUG();
88 do {
89 pud_t *pud;
90 pmd_t *pmd;
91
92 error = -ENOMEM;
93 pud = pud_alloc(&init_mm, dir, address);
94 if (!pud)
95 break;
96 pmd = pmd_alloc(&init_mm, pud, address);
97 if (!pmd)
98 break;
99 if (remap_area_pmd(pmd, address, end - address,
100 phys_addr + address, flags))
101 break;
102 error = 0;
103 address = (address + PGDIR_SIZE) & PGDIR_MASK;
104 dir++;
105 } while (address && (address < end));
106 flush_tlb_all();
107 return error;
108}
109
110#define IS_MAPPABLE_UNCACHEABLE(addr) (addr < 0x20000000UL)
111
112/*
113 * Map some physical address range into the kernel address space.
114 */
115void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
116 unsigned long cacheflag)
117{
118 struct vm_struct *area;
119 unsigned long offset;
120 unsigned long last_addr;
121 void *addr;
122
123 /* Don't allow wraparound or zero size */
124 last_addr = phys_addr + size - 1;
125
126 if (!size || last_addr < phys_addr)
127 return NULL;
128
129 /* Don't allow anybody to remap normal RAM that we're using */
130 if (phys_addr > PHYS_OFFSET && phys_addr < virt_to_phys(high_memory)) {
131 char *t_addr, *t_end;
132 struct page *page;
133
134 t_addr = __va(phys_addr);
135 t_end = t_addr + (size - 1);
136 for (page = virt_to_page(t_addr);
137 page <= virt_to_page(t_end); page++)
138 if (!PageReserved(page))
139 return NULL;
140 }
141
142 /*
143 * Map uncached objects in the low part of address space to
144 * CONFIG_NIOS2_IO_REGION_BASE
145 */
146 if (IS_MAPPABLE_UNCACHEABLE(phys_addr) &&
147 IS_MAPPABLE_UNCACHEABLE(last_addr) &&
148 !(cacheflag & _PAGE_CACHED))
149 return (void __iomem *)(CONFIG_NIOS2_IO_REGION_BASE + phys_addr);
150
151 /* Mappings have to be page-aligned */
152 offset = phys_addr & ~PAGE_MASK;
153 phys_addr &= PAGE_MASK;
154 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
155
156 /* Ok, go for it */
157 area = get_vm_area(size, VM_IOREMAP);
158 if (!area)
159 return NULL;
160 addr = area->addr;
161 if (remap_area_pages((unsigned long) addr, phys_addr, size,
162 cacheflag)) {
163 vunmap(addr);
164 return NULL;
165 }
166 return (void __iomem *) (offset + (char *)addr);
167}
168EXPORT_SYMBOL(__ioremap);
169
170/*
171 * __iounmap unmaps nearly everything, so be careful
172 * it doesn't free currently pointer/page tables anymore but it
173 * wasn't used anyway and might be added later.
174 */
175void __iounmap(void __iomem *addr)
176{
177 struct vm_struct *p;
178
179 if ((unsigned long) addr > CONFIG_NIOS2_IO_REGION_BASE)
180 return;
181
182 p = remove_vm_area((void *) (PAGE_MASK & (unsigned long __force) addr));
183 if (!p)
184 pr_err("iounmap: bad address %p\n", addr);
185 kfree(p);
186}
187EXPORT_SYMBOL(__iounmap);