blob: 20e4454e452eee0d481fb1f1588fc7d481f4cdac [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
10 * by Russell King
11 *
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
15 *
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
22 */
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/mm.h>
26#include <linux/vmalloc.h>
27
Russell King0ba8b9b2008-08-10 18:08:10 +010028#include <asm/cputype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/cacheflush.h>
30#include <asm/io.h>
Russell Kingff0daca2006-06-29 20:17:15 +010031#include <asm/mmu_context.h>
32#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070033#include <asm/tlbflush.h>
Russell Kingff0daca2006-06-29 20:17:15 +010034#include <asm/sizes.h>
35
Russell Kingb29e9f52007-04-21 10:47:29 +010036#include <asm/mach/map.h>
37#include "mm.h"
38
Russell Kingff0daca2006-06-29 20:17:15 +010039/*
Lennert Buytenheka069c892006-07-01 19:58:20 +010040 * Used by ioremap() and iounmap() code to mark (super)section-mapped
41 * I/O regions in vm_struct->flags field.
Russell Kingff0daca2006-06-29 20:17:15 +010042 */
43#define VM_ARM_SECTION_MAPPING 0x80000000
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Russell Kingda2c12a2006-12-13 14:35:58 +000045static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
Russell Kingb29e9f52007-04-21 10:47:29 +010046 unsigned long phys_addr, const struct mem_type *type)
Linus Torvalds1da177e2005-04-16 15:20:36 -070047{
Russell Kingb29e9f52007-04-21 10:47:29 +010048 pgprot_t prot = __pgprot(type->prot_pte);
Russell Kingda2c12a2006-12-13 14:35:58 +000049 pte_t *pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Russell Kingda2c12a2006-12-13 14:35:58 +000051 pte = pte_alloc_kernel(pmd, addr);
52 if (!pte)
53 return -ENOMEM;
54
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 do {
56 if (!pte_none(*pte))
57 goto bad;
58
Russell Kingc172cc92007-04-21 10:52:32 +010059 set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot),
60 type->prot_pte_ext);
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 phys_addr += PAGE_SIZE;
Russell Kingda2c12a2006-12-13 14:35:58 +000062 } while (pte++, addr += PAGE_SIZE, addr != end);
63 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070064
65 bad:
Russell Kingda2c12a2006-12-13 14:35:58 +000066 printk(KERN_CRIT "remap_area_pte: page already exists\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 BUG();
68}
69
Russell Kingda2c12a2006-12-13 14:35:58 +000070static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
71 unsigned long end, unsigned long phys_addr,
Russell Kingb29e9f52007-04-21 10:47:29 +010072 const struct mem_type *type)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
Russell Kingda2c12a2006-12-13 14:35:58 +000074 unsigned long next;
75 pmd_t *pmd;
76 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Russell Kingda2c12a2006-12-13 14:35:58 +000078 pmd = pmd_alloc(&init_mm, pgd, addr);
79 if (!pmd)
80 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -070081
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 do {
Russell Kingda2c12a2006-12-13 14:35:58 +000083 next = pmd_addr_end(addr, end);
Russell Kingb29e9f52007-04-21 10:47:29 +010084 ret = remap_area_pte(pmd, addr, next, phys_addr, type);
Russell Kingda2c12a2006-12-13 14:35:58 +000085 if (ret)
86 return ret;
87 phys_addr += next - addr;
88 } while (pmd++, addr = next, addr != end);
89 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090}
91
Russell Kingda2c12a2006-12-13 14:35:58 +000092static int remap_area_pages(unsigned long start, unsigned long pfn,
Russell Kingb29e9f52007-04-21 10:47:29 +010093 size_t size, const struct mem_type *type)
Linus Torvalds1da177e2005-04-16 15:20:36 -070094{
Russell Kingda2c12a2006-12-13 14:35:58 +000095 unsigned long addr = start;
96 unsigned long next, end = start + size;
Deepak Saxena9d4ae722006-01-09 19:23:11 +000097 unsigned long phys_addr = __pfn_to_phys(pfn);
Russell Kingda2c12a2006-12-13 14:35:58 +000098 pgd_t *pgd;
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100
Russell Kingda2c12a2006-12-13 14:35:58 +0000101 BUG_ON(addr >= end);
102 pgd = pgd_offset_k(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 do {
Russell Kingda2c12a2006-12-13 14:35:58 +0000104 next = pgd_addr_end(addr, end);
Russell Kingb29e9f52007-04-21 10:47:29 +0100105 err = remap_area_pmd(pgd, addr, next, phys_addr, type);
Russell Kingda2c12a2006-12-13 14:35:58 +0000106 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 break;
Russell Kingda2c12a2006-12-13 14:35:58 +0000108 phys_addr += next - addr;
109 } while (pgd++, addr = next, addr != end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 return err;
112}
113
Russell Kingff0daca2006-06-29 20:17:15 +0100114
115void __check_kvm_seq(struct mm_struct *mm)
116{
117 unsigned int seq;
118
119 do {
120 seq = init_mm.context.kvm_seq;
121 memcpy(pgd_offset(mm, VMALLOC_START),
122 pgd_offset_k(VMALLOC_START),
123 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
124 pgd_index(VMALLOC_START)));
125 mm->context.kvm_seq = seq;
126 } while (seq != init_mm.context.kvm_seq);
127}
128
129#ifndef CONFIG_SMP
130/*
131 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
132 * the other CPUs will not see this change until their next context switch.
133 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
134 * which requires the new ioremap'd region to be referenced, the CPU will
135 * reference the _old_ region.
136 *
137 * Note that get_vm_area() allocates a guard 4K page, so we need to mask
138 * the size back to 1MB aligned or we will overflow in the loop below.
139 */
140static void unmap_area_sections(unsigned long virt, unsigned long size)
141{
142 unsigned long addr = virt, end = virt + (size & ~SZ_1M);
143 pgd_t *pgd;
144
145 flush_cache_vunmap(addr, end);
146 pgd = pgd_offset_k(addr);
147 do {
148 pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
149
150 pmd = *pmdp;
151 if (!pmd_none(pmd)) {
152 /*
153 * Clear the PMD from the page table, and
154 * increment the kvm sequence so others
155 * notice this change.
156 *
157 * Note: this is still racy on SMP machines.
158 */
159 pmd_clear(pmdp);
160 init_mm.context.kvm_seq++;
161
162 /*
163 * Free the page table, if there was one.
164 */
165 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800166 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
Russell Kingff0daca2006-06-29 20:17:15 +0100167 }
168
169 addr += PGDIR_SIZE;
170 pgd++;
171 } while (addr < end);
172
173 /*
174 * Ensure that the active_mm is up to date - we want to
175 * catch any use-after-iounmap cases.
176 */
177 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
178 __check_kvm_seq(current->active_mm);
179
180 flush_tlb_kernel_range(virt, end);
181}
182
183static int
184remap_area_sections(unsigned long virt, unsigned long pfn,
Russell Kingb29e9f52007-04-21 10:47:29 +0100185 size_t size, const struct mem_type *type)
Russell Kingff0daca2006-06-29 20:17:15 +0100186{
Russell Kingb29e9f52007-04-21 10:47:29 +0100187 unsigned long addr = virt, end = virt + size;
Russell Kingff0daca2006-06-29 20:17:15 +0100188 pgd_t *pgd;
189
190 /*
191 * Remove and free any PTE-based mapping, and
192 * sync the current kernel mapping.
193 */
194 unmap_area_sections(virt, size);
195
Russell Kingff0daca2006-06-29 20:17:15 +0100196 pgd = pgd_offset_k(addr);
197 do {
198 pmd_t *pmd = pmd_offset(pgd, addr);
199
Russell Kingb29e9f52007-04-21 10:47:29 +0100200 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
Russell Kingff0daca2006-06-29 20:17:15 +0100201 pfn += SZ_1M >> PAGE_SHIFT;
Russell Kingb29e9f52007-04-21 10:47:29 +0100202 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
Russell Kingff0daca2006-06-29 20:17:15 +0100203 pfn += SZ_1M >> PAGE_SHIFT;
204 flush_pmd_entry(pmd);
205
206 addr += PGDIR_SIZE;
207 pgd++;
208 } while (addr < end);
209
210 return 0;
211}
Lennert Buytenheka069c892006-07-01 19:58:20 +0100212
213static int
214remap_area_supersections(unsigned long virt, unsigned long pfn,
Russell Kingb29e9f52007-04-21 10:47:29 +0100215 size_t size, const struct mem_type *type)
Lennert Buytenheka069c892006-07-01 19:58:20 +0100216{
Russell Kingb29e9f52007-04-21 10:47:29 +0100217 unsigned long addr = virt, end = virt + size;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100218 pgd_t *pgd;
219
220 /*
221 * Remove and free any PTE-based mapping, and
222 * sync the current kernel mapping.
223 */
224 unmap_area_sections(virt, size);
225
Lennert Buytenheka069c892006-07-01 19:58:20 +0100226 pgd = pgd_offset_k(virt);
227 do {
228 unsigned long super_pmd_val, i;
229
Russell Kingb29e9f52007-04-21 10:47:29 +0100230 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
231 PMD_SECT_SUPER;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100232 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
233
234 for (i = 0; i < 8; i++) {
235 pmd_t *pmd = pmd_offset(pgd, addr);
236
237 pmd[0] = __pmd(super_pmd_val);
238 pmd[1] = __pmd(super_pmd_val);
239 flush_pmd_entry(pmd);
240
241 addr += PGDIR_SIZE;
242 pgd++;
243 }
244
245 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
246 } while (addr < end);
247
248 return 0;
249}
Russell Kingff0daca2006-06-29 20:17:15 +0100250#endif
251
252
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253/*
254 * Remap an arbitrary physical address space into the kernel virtual
255 * address space. Needed when the kernel wants to access high addresses
256 * directly.
257 *
258 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
259 * have to convert them into an offset in a page-aligned mapping, but the
260 * caller shouldn't need to know that small detail.
261 *
262 * 'flags' are the extra L_PTE_ flags that you want to specify for this
Russell King4baa9922008-08-02 10:55:55 +0100263 * mapping. See <asm/pgtable.h> for more information.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 */
265void __iomem *
Russell King3603ab22007-05-05 20:59:27 +0100266__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
267 unsigned int mtype)
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000268{
Russell Kingb29e9f52007-04-21 10:47:29 +0100269 const struct mem_type *type;
Russell Kingff0daca2006-06-29 20:17:15 +0100270 int err;
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000271 unsigned long addr;
272 struct vm_struct * area;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100273
274 /*
275 * High mappings must be supersection aligned
276 */
277 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
278 return NULL;
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000279
Russell King3603ab22007-05-05 20:59:27 +0100280 type = get_mem_type(mtype);
281 if (!type)
282 return NULL;
Russell Kingb29e9f52007-04-21 10:47:29 +0100283
Russell King6d78b5f2007-06-03 19:26:04 +0100284 /*
285 * Page align the mapping size, taking account of any offset.
286 */
287 size = PAGE_ALIGN(offset + size);
Russell Kingc924aff2006-12-17 23:29:57 +0000288
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000289 area = get_vm_area(size, VM_IOREMAP);
290 if (!area)
291 return NULL;
292 addr = (unsigned long)area->addr;
Russell Kingff0daca2006-06-29 20:17:15 +0100293
294#ifndef CONFIG_SMP
Catalin Marinas412489a2007-01-25 14:16:47 +0100295 if (DOMAIN_IO == 0 &&
296 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
Russell King4a56c1e2007-04-21 10:16:48 +0100297 cpu_is_xsc3()) && pfn >= 0x100000 &&
Lennert Buytenheka069c892006-07-01 19:58:20 +0100298 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
299 area->flags |= VM_ARM_SECTION_MAPPING;
Russell Kingb29e9f52007-04-21 10:47:29 +0100300 err = remap_area_supersections(addr, pfn, size, type);
Lennert Buytenheka069c892006-07-01 19:58:20 +0100301 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
Russell Kingff0daca2006-06-29 20:17:15 +0100302 area->flags |= VM_ARM_SECTION_MAPPING;
Russell Kingb29e9f52007-04-21 10:47:29 +0100303 err = remap_area_sections(addr, pfn, size, type);
Russell Kingff0daca2006-06-29 20:17:15 +0100304 } else
305#endif
Russell Kingb29e9f52007-04-21 10:47:29 +0100306 err = remap_area_pages(addr, pfn, size, type);
Russell Kingff0daca2006-06-29 20:17:15 +0100307
308 if (err) {
Catalin Marinas478922c2006-05-16 11:30:26 +0100309 vunmap((void *)addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000310 return NULL;
311 }
Russell Kingff0daca2006-06-29 20:17:15 +0100312
313 flush_cache_vmap(addr, addr + size);
314 return (void __iomem *) (offset + addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000315}
Russell King3603ab22007-05-05 20:59:27 +0100316EXPORT_SYMBOL(__arm_ioremap_pfn);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000317
318void __iomem *
Russell King3603ab22007-05-05 20:59:27 +0100319__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700320{
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000321 unsigned long last_addr;
322 unsigned long offset = phys_addr & ~PAGE_MASK;
323 unsigned long pfn = __phys_to_pfn(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000325 /*
326 * Don't allow wraparound or zero size
327 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 last_addr = phys_addr + size - 1;
329 if (!size || last_addr < phys_addr)
330 return NULL;
331
Russell King3603ab22007-05-05 20:59:27 +0100332 return __arm_ioremap_pfn(pfn, offset, size, mtype);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333}
Russell King3603ab22007-05-05 20:59:27 +0100334EXPORT_SYMBOL(__arm_ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335
Al Viro16226052006-10-09 02:09:49 +0100336void __iounmap(volatile void __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337{
Catalin Marinasceaccbd2006-07-29 08:29:30 +0100338#ifndef CONFIG_SMP
Russell Kingff0daca2006-06-29 20:17:15 +0100339 struct vm_struct **p, *tmp;
Catalin Marinasceaccbd2006-07-29 08:29:30 +0100340#endif
Russell Kingff0daca2006-06-29 20:17:15 +0100341 unsigned int section_mapping = 0;
342
Al Viro16226052006-10-09 02:09:49 +0100343 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr);
Russell Kingff0daca2006-06-29 20:17:15 +0100344
Lennert Buytenhek7cddc392006-07-03 12:26:02 +0100345#ifndef CONFIG_SMP
Russell Kingff0daca2006-06-29 20:17:15 +0100346 /*
347 * If this is a section based mapping we need to handle it
Simon Arlott6cbdc8c2007-05-11 20:40:30 +0100348 * specially as the VM subsystem does not know how to handle
Russell Kingff0daca2006-06-29 20:17:15 +0100349 * such a beast. We need the lock here b/c we need to clear
350 * all the mappings before the area can be reclaimed
351 * by someone else.
352 */
353 write_lock(&vmlist_lock);
354 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
355 if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
356 if (tmp->flags & VM_ARM_SECTION_MAPPING) {
357 *p = tmp->next;
358 unmap_area_sections((unsigned long)tmp->addr,
359 tmp->size);
360 kfree(tmp);
361 section_mapping = 1;
362 }
363 break;
364 }
365 }
366 write_unlock(&vmlist_lock);
Lennert Buytenhek7cddc392006-07-03 12:26:02 +0100367#endif
Russell Kingff0daca2006-06-29 20:17:15 +0100368
369 if (!section_mapping)
Al Viro16226052006-10-09 02:09:49 +0100370 vunmap((void __force *)addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371}
372EXPORT_SYMBOL(__iounmap);