blob: ab506272b2d3ef459b264b7741d61af46f6aa6b8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
10 * by Russell King
11 *
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
15 *
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
22 */
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/mm.h>
26#include <linux/vmalloc.h>
Russell Kingfced80c2008-09-06 12:10:45 +010027#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Russell King0ba8b9b2008-08-10 18:08:10 +010029#include <asm/cputype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/cacheflush.h>
Russell Kingff0daca2006-06-29 20:17:15 +010031#include <asm/mmu_context.h>
32#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/tlbflush.h>
Russell Kingff0daca2006-06-29 20:17:15 +010034#include <asm/sizes.h>
35
Russell Kingb29e9f52007-04-21 10:47:29 +010036#include <asm/mach/map.h>
37#include "mm.h"
38
Russell Kingff0daca2006-06-29 20:17:15 +010039/*
Lennert Buytenheka069c892006-07-01 19:58:20 +010040 * Used by ioremap() and iounmap() code to mark (super)section-mapped
41 * I/O regions in vm_struct->flags field.
Russell Kingff0daca2006-06-29 20:17:15 +010042 */
43#define VM_ARM_SECTION_MAPPING 0x80000000
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020045int ioremap_page(unsigned long virt, unsigned long phys,
46 const struct mem_type *mtype)
47{
Russell Kingd7461962010-07-26 10:29:13 +010048 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
49 __pgprot(mtype->prot_pte));
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020050}
51EXPORT_SYMBOL(ioremap_page);
Russell Kingff0daca2006-06-29 20:17:15 +010052
53void __check_kvm_seq(struct mm_struct *mm)
54{
55 unsigned int seq;
56
57 do {
58 seq = init_mm.context.kvm_seq;
59 memcpy(pgd_offset(mm, VMALLOC_START),
60 pgd_offset_k(VMALLOC_START),
61 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
62 pgd_index(VMALLOC_START)));
63 mm->context.kvm_seq = seq;
64 } while (seq != init_mm.context.kvm_seq);
65}
66
67#ifndef CONFIG_SMP
68/*
69 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
70 * the other CPUs will not see this change until their next context switch.
71 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
72 * which requires the new ioremap'd region to be referenced, the CPU will
73 * reference the _old_ region.
74 *
Russell King31aa8fd2009-12-18 11:10:03 +000075 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
76 * mask the size back to 1MB aligned or we will overflow in the loop below.
Russell Kingff0daca2006-06-29 20:17:15 +010077 */
78static void unmap_area_sections(unsigned long virt, unsigned long size)
79{
Russell King24f11ec2009-01-25 17:36:34 +000080 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
Russell Kingff0daca2006-06-29 20:17:15 +010081 pgd_t *pgd;
82
83 flush_cache_vunmap(addr, end);
84 pgd = pgd_offset_k(addr);
85 do {
86 pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
87
88 pmd = *pmdp;
89 if (!pmd_none(pmd)) {
90 /*
91 * Clear the PMD from the page table, and
92 * increment the kvm sequence so others
93 * notice this change.
94 *
95 * Note: this is still racy on SMP machines.
96 */
97 pmd_clear(pmdp);
98 init_mm.context.kvm_seq++;
99
100 /*
101 * Free the page table, if there was one.
102 */
103 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800104 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
Russell Kingff0daca2006-06-29 20:17:15 +0100105 }
106
107 addr += PGDIR_SIZE;
108 pgd++;
109 } while (addr < end);
110
111 /*
112 * Ensure that the active_mm is up to date - we want to
113 * catch any use-after-iounmap cases.
114 */
115 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
116 __check_kvm_seq(current->active_mm);
117
118 flush_tlb_kernel_range(virt, end);
119}
120
121static int
122remap_area_sections(unsigned long virt, unsigned long pfn,
Russell Kingb29e9f52007-04-21 10:47:29 +0100123 size_t size, const struct mem_type *type)
Russell Kingff0daca2006-06-29 20:17:15 +0100124{
Russell Kingb29e9f52007-04-21 10:47:29 +0100125 unsigned long addr = virt, end = virt + size;
Russell Kingff0daca2006-06-29 20:17:15 +0100126 pgd_t *pgd;
127
128 /*
129 * Remove and free any PTE-based mapping, and
130 * sync the current kernel mapping.
131 */
132 unmap_area_sections(virt, size);
133
Russell Kingff0daca2006-06-29 20:17:15 +0100134 pgd = pgd_offset_k(addr);
135 do {
136 pmd_t *pmd = pmd_offset(pgd, addr);
137
Russell Kingb29e9f52007-04-21 10:47:29 +0100138 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
Russell Kingff0daca2006-06-29 20:17:15 +0100139 pfn += SZ_1M >> PAGE_SHIFT;
Russell Kingb29e9f52007-04-21 10:47:29 +0100140 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
Russell Kingff0daca2006-06-29 20:17:15 +0100141 pfn += SZ_1M >> PAGE_SHIFT;
142 flush_pmd_entry(pmd);
143
144 addr += PGDIR_SIZE;
145 pgd++;
146 } while (addr < end);
147
148 return 0;
149}
Lennert Buytenheka069c892006-07-01 19:58:20 +0100150
151static int
152remap_area_supersections(unsigned long virt, unsigned long pfn,
Russell Kingb29e9f52007-04-21 10:47:29 +0100153 size_t size, const struct mem_type *type)
Lennert Buytenheka069c892006-07-01 19:58:20 +0100154{
Russell Kingb29e9f52007-04-21 10:47:29 +0100155 unsigned long addr = virt, end = virt + size;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100156 pgd_t *pgd;
157
158 /*
159 * Remove and free any PTE-based mapping, and
160 * sync the current kernel mapping.
161 */
162 unmap_area_sections(virt, size);
163
Lennert Buytenheka069c892006-07-01 19:58:20 +0100164 pgd = pgd_offset_k(virt);
165 do {
166 unsigned long super_pmd_val, i;
167
Russell Kingb29e9f52007-04-21 10:47:29 +0100168 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
169 PMD_SECT_SUPER;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100170 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
171
172 for (i = 0; i < 8; i++) {
173 pmd_t *pmd = pmd_offset(pgd, addr);
174
175 pmd[0] = __pmd(super_pmd_val);
176 pmd[1] = __pmd(super_pmd_val);
177 flush_pmd_entry(pmd);
178
179 addr += PGDIR_SIZE;
180 pgd++;
181 }
182
183 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
184 } while (addr < end);
185
186 return 0;
187}
Russell Kingff0daca2006-06-29 20:17:15 +0100188#endif
189
Russell King31aa8fd2009-12-18 11:10:03 +0000190void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
191 unsigned long offset, size_t size, unsigned int mtype, void *caller)
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000192{
Russell Kingb29e9f52007-04-21 10:47:29 +0100193 const struct mem_type *type;
Russell Kingff0daca2006-06-29 20:17:15 +0100194 int err;
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000195 unsigned long addr;
196 struct vm_struct * area;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100197
198 /*
199 * High mappings must be supersection aligned
200 */
201 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
202 return NULL;
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000203
Russell King309caa92010-06-21 21:03:18 +0100204 /*
205 * Don't allow RAM to be mapped - this causes problems with ARMv6+
206 */
Russell King67cfa232010-12-21 10:42:20 +0000207 if (WARN_ON(pfn_valid(pfn)))
208 return NULL;
Russell King309caa92010-06-21 21:03:18 +0100209
Russell King3603ab22007-05-05 20:59:27 +0100210 type = get_mem_type(mtype);
211 if (!type)
212 return NULL;
Russell Kingb29e9f52007-04-21 10:47:29 +0100213
Russell King6d78b5f2007-06-03 19:26:04 +0100214 /*
215 * Page align the mapping size, taking account of any offset.
216 */
217 size = PAGE_ALIGN(offset + size);
Russell Kingc924aff2006-12-17 23:29:57 +0000218
Russell King31aa8fd2009-12-18 11:10:03 +0000219 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000220 if (!area)
221 return NULL;
222 addr = (unsigned long)area->addr;
Russell Kingff0daca2006-06-29 20:17:15 +0100223
224#ifndef CONFIG_SMP
Catalin Marinas412489a2007-01-25 14:16:47 +0100225 if (DOMAIN_IO == 0 &&
226 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
Russell King4a56c1e2007-04-21 10:16:48 +0100227 cpu_is_xsc3()) && pfn >= 0x100000 &&
Lennert Buytenheka069c892006-07-01 19:58:20 +0100228 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
229 area->flags |= VM_ARM_SECTION_MAPPING;
Russell Kingb29e9f52007-04-21 10:47:29 +0100230 err = remap_area_supersections(addr, pfn, size, type);
Lennert Buytenheka069c892006-07-01 19:58:20 +0100231 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
Russell Kingff0daca2006-06-29 20:17:15 +0100232 area->flags |= VM_ARM_SECTION_MAPPING;
Russell Kingb29e9f52007-04-21 10:47:29 +0100233 err = remap_area_sections(addr, pfn, size, type);
Russell Kingff0daca2006-06-29 20:17:15 +0100234 } else
235#endif
Russell Kingd7461962010-07-26 10:29:13 +0100236 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
237 __pgprot(type->prot_pte));
Russell Kingff0daca2006-06-29 20:17:15 +0100238
239 if (err) {
Catalin Marinas478922c2006-05-16 11:30:26 +0100240 vunmap((void *)addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000241 return NULL;
242 }
Russell Kingff0daca2006-06-29 20:17:15 +0100243
244 flush_cache_vmap(addr, addr + size);
245 return (void __iomem *) (offset + addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000246}
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000247
Russell King31aa8fd2009-12-18 11:10:03 +0000248void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
249 unsigned int mtype, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250{
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000251 unsigned long last_addr;
252 unsigned long offset = phys_addr & ~PAGE_MASK;
253 unsigned long pfn = __phys_to_pfn(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000255 /*
256 * Don't allow wraparound or zero size
257 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 last_addr = phys_addr + size - 1;
259 if (!size || last_addr < phys_addr)
260 return NULL;
261
Russell King31aa8fd2009-12-18 11:10:03 +0000262 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
263 caller);
264}
265
266/*
267 * Remap an arbitrary physical address space into the kernel virtual
268 * address space. Needed when the kernel wants to access high addresses
269 * directly.
270 *
271 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
272 * have to convert them into an offset in a page-aligned mapping, but the
273 * caller shouldn't need to know that small detail.
274 */
275void __iomem *
276__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
277 unsigned int mtype)
278{
279 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
280 __builtin_return_address(0));
281}
282EXPORT_SYMBOL(__arm_ioremap_pfn);
283
284void __iomem *
285__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
286{
287 return __arm_ioremap_caller(phys_addr, size, mtype,
288 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289}
Russell King3603ab22007-05-05 20:59:27 +0100290EXPORT_SYMBOL(__arm_ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291
Russell King09d9bae2008-09-05 14:08:44 +0100292void __iounmap(volatile void __iomem *io_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293{
Russell King09d9bae2008-09-05 14:08:44 +0100294 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
Catalin Marinasceaccbd2006-07-29 08:29:30 +0100295#ifndef CONFIG_SMP
Russell Kingff0daca2006-06-29 20:17:15 +0100296 struct vm_struct **p, *tmp;
Russell Kingff0daca2006-06-29 20:17:15 +0100297
Russell Kingff0daca2006-06-29 20:17:15 +0100298 /*
299 * If this is a section based mapping we need to handle it
Simon Arlott6cbdc8c2007-05-11 20:40:30 +0100300 * specially as the VM subsystem does not know how to handle
Russell Kingff0daca2006-06-29 20:17:15 +0100301 * such a beast. We need the lock here b/c we need to clear
302 * all the mappings before the area can be reclaimed
303 * by someone else.
304 */
305 write_lock(&vmlist_lock);
306 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
Russell King09d9bae2008-09-05 14:08:44 +0100307 if ((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
Russell Kingff0daca2006-06-29 20:17:15 +0100308 if (tmp->flags & VM_ARM_SECTION_MAPPING) {
Russell Kingff0daca2006-06-29 20:17:15 +0100309 unmap_area_sections((unsigned long)tmp->addr,
310 tmp->size);
Russell Kingff0daca2006-06-29 20:17:15 +0100311 }
312 break;
313 }
314 }
315 write_unlock(&vmlist_lock);
Lennert Buytenhek7cddc392006-07-03 12:26:02 +0100316#endif
Russell Kingff0daca2006-06-29 20:17:15 +0100317
Russell King24f11ec2009-01-25 17:36:34 +0000318 vunmap(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319}
320EXPORT_SYMBOL(__iounmap);