blob: 0ac615c0f7987f81ccc7aa3a16c8ace113f65192 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
10 * by Russell King
11 *
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
15 *
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
22 */
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/mm.h>
26#include <linux/vmalloc.h>
27
28#include <asm/cacheflush.h>
29#include <asm/io.h>
Russell Kingff0daca2006-06-29 20:17:15 +010030#include <asm/mmu_context.h>
31#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/tlbflush.h>
Russell Kingff0daca2006-06-29 20:17:15 +010033#include <asm/sizes.h>
34
35/*
Lennert Buytenheka069c892006-07-01 19:58:20 +010036 * Used by ioremap() and iounmap() code to mark (super)section-mapped
37 * I/O regions in vm_struct->flags field.
Russell Kingff0daca2006-06-29 20:17:15 +010038 */
39#define VM_ARM_SECTION_MAPPING 0x80000000
Linus Torvalds1da177e2005-04-16 15:20:36 -070040
Russell Kingda2c12a2006-12-13 14:35:58 +000041static int remap_area_pte(pmd_t *pmd, unsigned long addr, unsigned long end,
42 unsigned long phys_addr, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -070043{
Russell Kingda2c12a2006-12-13 14:35:58 +000044 pte_t *pte;
Linus Torvalds1da177e2005-04-16 15:20:36 -070045
Russell Kingda2c12a2006-12-13 14:35:58 +000046 pte = pte_alloc_kernel(pmd, addr);
47 if (!pte)
48 return -ENOMEM;
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050 do {
51 if (!pte_none(*pte))
52 goto bad;
53
Russell Kingad1ae2f2006-12-13 14:34:43 +000054 set_pte_ext(pte, pfn_pte(phys_addr >> PAGE_SHIFT, prot), 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -070055 phys_addr += PAGE_SIZE;
Russell Kingda2c12a2006-12-13 14:35:58 +000056 } while (pte++, addr += PAGE_SIZE, addr != end);
57 return 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59 bad:
Russell Kingda2c12a2006-12-13 14:35:58 +000060 printk(KERN_CRIT "remap_area_pte: page already exists\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070061 BUG();
62}
63
Russell Kingda2c12a2006-12-13 14:35:58 +000064static inline int remap_area_pmd(pgd_t *pgd, unsigned long addr,
65 unsigned long end, unsigned long phys_addr,
66 pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067{
Russell Kingda2c12a2006-12-13 14:35:58 +000068 unsigned long next;
69 pmd_t *pmd;
70 int ret = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070071
Russell Kingda2c12a2006-12-13 14:35:58 +000072 pmd = pmd_alloc(&init_mm, pgd, addr);
73 if (!pmd)
74 return -ENOMEM;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Linus Torvalds1da177e2005-04-16 15:20:36 -070076 do {
Russell Kingda2c12a2006-12-13 14:35:58 +000077 next = pmd_addr_end(addr, end);
78 ret = remap_area_pte(pmd, addr, next, phys_addr, prot);
79 if (ret)
80 return ret;
81 phys_addr += next - addr;
82 } while (pmd++, addr = next, addr != end);
83 return ret;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084}
85
Russell Kingda2c12a2006-12-13 14:35:58 +000086static int remap_area_pages(unsigned long start, unsigned long pfn,
87 unsigned long size, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -070088{
Russell Kingda2c12a2006-12-13 14:35:58 +000089 unsigned long addr = start;
90 unsigned long next, end = start + size;
Deepak Saxena9d4ae722006-01-09 19:23:11 +000091 unsigned long phys_addr = __pfn_to_phys(pfn);
Russell Kingda2c12a2006-12-13 14:35:58 +000092 pgprot_t prot = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
93 L_PTE_DIRTY | L_PTE_WRITE | flags);
94 pgd_t *pgd;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 int err = 0;
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
Russell Kingda2c12a2006-12-13 14:35:58 +000097 BUG_ON(addr >= end);
98 pgd = pgd_offset_k(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070099 do {
Russell Kingda2c12a2006-12-13 14:35:58 +0000100 next = pgd_addr_end(addr, end);
101 err = remap_area_pmd(pgd, addr, next, phys_addr, prot);
102 if (err)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103 break;
Russell Kingda2c12a2006-12-13 14:35:58 +0000104 phys_addr += next - addr;
105 } while (pgd++, addr = next, addr != end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107 return err;
108}
109
Russell Kingff0daca2006-06-29 20:17:15 +0100110
111void __check_kvm_seq(struct mm_struct *mm)
112{
113 unsigned int seq;
114
115 do {
116 seq = init_mm.context.kvm_seq;
117 memcpy(pgd_offset(mm, VMALLOC_START),
118 pgd_offset_k(VMALLOC_START),
119 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
120 pgd_index(VMALLOC_START)));
121 mm->context.kvm_seq = seq;
122 } while (seq != init_mm.context.kvm_seq);
123}
124
125#ifndef CONFIG_SMP
126/*
127 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
128 * the other CPUs will not see this change until their next context switch.
129 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
130 * which requires the new ioremap'd region to be referenced, the CPU will
131 * reference the _old_ region.
132 *
133 * Note that get_vm_area() allocates a guard 4K page, so we need to mask
134 * the size back to 1MB aligned or we will overflow in the loop below.
135 */
136static void unmap_area_sections(unsigned long virt, unsigned long size)
137{
138 unsigned long addr = virt, end = virt + (size & ~SZ_1M);
139 pgd_t *pgd;
140
141 flush_cache_vunmap(addr, end);
142 pgd = pgd_offset_k(addr);
143 do {
144 pmd_t pmd, *pmdp = pmd_offset(pgd, addr);
145
146 pmd = *pmdp;
147 if (!pmd_none(pmd)) {
148 /*
149 * Clear the PMD from the page table, and
150 * increment the kvm sequence so others
151 * notice this change.
152 *
153 * Note: this is still racy on SMP machines.
154 */
155 pmd_clear(pmdp);
156 init_mm.context.kvm_seq++;
157
158 /*
159 * Free the page table, if there was one.
160 */
161 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
Dave McCracken46a82b22006-09-25 23:31:48 -0700162 pte_free_kernel(pmd_page_vaddr(pmd));
Russell Kingff0daca2006-06-29 20:17:15 +0100163 }
164
165 addr += PGDIR_SIZE;
166 pgd++;
167 } while (addr < end);
168
169 /*
170 * Ensure that the active_mm is up to date - we want to
171 * catch any use-after-iounmap cases.
172 */
173 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
174 __check_kvm_seq(current->active_mm);
175
176 flush_tlb_kernel_range(virt, end);
177}
178
179static int
180remap_area_sections(unsigned long virt, unsigned long pfn,
181 unsigned long size, unsigned long flags)
182{
183 unsigned long prot, addr = virt, end = virt + size;
184 pgd_t *pgd;
185
186 /*
187 * Remove and free any PTE-based mapping, and
188 * sync the current kernel mapping.
189 */
190 unmap_area_sections(virt, size);
191
192 prot = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_DOMAIN(DOMAIN_IO) |
193 (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
194
195 /*
196 * ARMv6 and above need XN set to prevent speculative prefetches
197 * hitting IO.
198 */
199 if (cpu_architecture() >= CPU_ARCH_ARMv6)
200 prot |= PMD_SECT_XN;
201
202 pgd = pgd_offset_k(addr);
203 do {
204 pmd_t *pmd = pmd_offset(pgd, addr);
205
206 pmd[0] = __pmd(__pfn_to_phys(pfn) | prot);
207 pfn += SZ_1M >> PAGE_SHIFT;
208 pmd[1] = __pmd(__pfn_to_phys(pfn) | prot);
209 pfn += SZ_1M >> PAGE_SHIFT;
210 flush_pmd_entry(pmd);
211
212 addr += PGDIR_SIZE;
213 pgd++;
214 } while (addr < end);
215
216 return 0;
217}
Lennert Buytenheka069c892006-07-01 19:58:20 +0100218
219static int
220remap_area_supersections(unsigned long virt, unsigned long pfn,
221 unsigned long size, unsigned long flags)
222{
223 unsigned long prot, addr = virt, end = virt + size;
224 pgd_t *pgd;
225
226 /*
227 * Remove and free any PTE-based mapping, and
228 * sync the current kernel mapping.
229 */
230 unmap_area_sections(virt, size);
231
232 prot = PMD_TYPE_SECT | PMD_SECT_SUPER | PMD_SECT_AP_WRITE |
233 PMD_DOMAIN(DOMAIN_IO) |
234 (flags & (L_PTE_CACHEABLE | L_PTE_BUFFERABLE));
235
236 /*
237 * ARMv6 and above need XN set to prevent speculative prefetches
238 * hitting IO.
239 */
240 if (cpu_architecture() >= CPU_ARCH_ARMv6)
241 prot |= PMD_SECT_XN;
242
243 pgd = pgd_offset_k(virt);
244 do {
245 unsigned long super_pmd_val, i;
246
247 super_pmd_val = __pfn_to_phys(pfn) | prot;
248 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
249
250 for (i = 0; i < 8; i++) {
251 pmd_t *pmd = pmd_offset(pgd, addr);
252
253 pmd[0] = __pmd(super_pmd_val);
254 pmd[1] = __pmd(super_pmd_val);
255 flush_pmd_entry(pmd);
256
257 addr += PGDIR_SIZE;
258 pgd++;
259 }
260
261 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
262 } while (addr < end);
263
264 return 0;
265}
Russell Kingff0daca2006-06-29 20:17:15 +0100266#endif
267
268
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269/*
270 * Remap an arbitrary physical address space into the kernel virtual
271 * address space. Needed when the kernel wants to access high addresses
272 * directly.
273 *
274 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
275 * have to convert them into an offset in a page-aligned mapping, but the
276 * caller shouldn't need to know that small detail.
277 *
278 * 'flags' are the extra L_PTE_ flags that you want to specify for this
279 * mapping. See include/asm-arm/proc-armv/pgtable.h for more information.
280 */
281void __iomem *
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000282__ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
283 unsigned long flags)
284{
Russell Kingff0daca2006-06-29 20:17:15 +0100285 int err;
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000286 unsigned long addr;
287 struct vm_struct * area;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100288
289 /*
290 * High mappings must be supersection aligned
291 */
292 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
293 return NULL;
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000294
Russell Kingc924aff2006-12-17 23:29:57 +0000295 size = PAGE_ALIGN(size);
296
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000297 area = get_vm_area(size, VM_IOREMAP);
298 if (!area)
299 return NULL;
300 addr = (unsigned long)area->addr;
Russell Kingff0daca2006-06-29 20:17:15 +0100301
302#ifndef CONFIG_SMP
Catalin Marinas412489a2007-01-25 14:16:47 +0100303 if (DOMAIN_IO == 0 &&
304 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
Lennert Buytenheka069c892006-07-01 19:58:20 +0100305 cpu_is_xsc3()) &&
306 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
307 area->flags |= VM_ARM_SECTION_MAPPING;
308 err = remap_area_supersections(addr, pfn, size, flags);
309 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
Russell Kingff0daca2006-06-29 20:17:15 +0100310 area->flags |= VM_ARM_SECTION_MAPPING;
311 err = remap_area_sections(addr, pfn, size, flags);
312 } else
313#endif
314 err = remap_area_pages(addr, pfn, size, flags);
315
316 if (err) {
Catalin Marinas478922c2006-05-16 11:30:26 +0100317 vunmap((void *)addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000318 return NULL;
319 }
Russell Kingff0daca2006-06-29 20:17:15 +0100320
321 flush_cache_vmap(addr, addr + size);
322 return (void __iomem *) (offset + addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000323}
324EXPORT_SYMBOL(__ioremap_pfn);
325
326void __iomem *
Russell King67a19012005-11-17 16:48:00 +0000327__ioremap(unsigned long phys_addr, size_t size, unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328{
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000329 unsigned long last_addr;
330 unsigned long offset = phys_addr & ~PAGE_MASK;
331 unsigned long pfn = __phys_to_pfn(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000333 /*
334 * Don't allow wraparound or zero size
335 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 last_addr = phys_addr + size - 1;
337 if (!size || last_addr < phys_addr)
338 return NULL;
339
340 /*
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000341 * Page align the mapping size
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700343 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
344
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000345 return __ioremap_pfn(pfn, offset, size, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346}
347EXPORT_SYMBOL(__ioremap);
348
Al Viro16226052006-10-09 02:09:49 +0100349void __iounmap(volatile void __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350{
Catalin Marinasceaccbd2006-07-29 08:29:30 +0100351#ifndef CONFIG_SMP
Russell Kingff0daca2006-06-29 20:17:15 +0100352 struct vm_struct **p, *tmp;
Catalin Marinasceaccbd2006-07-29 08:29:30 +0100353#endif
Russell Kingff0daca2006-06-29 20:17:15 +0100354 unsigned int section_mapping = 0;
355
Al Viro16226052006-10-09 02:09:49 +0100356 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long)addr);
Russell Kingff0daca2006-06-29 20:17:15 +0100357
Lennert Buytenhek7cddc392006-07-03 12:26:02 +0100358#ifndef CONFIG_SMP
Russell Kingff0daca2006-06-29 20:17:15 +0100359 /*
360 * If this is a section based mapping we need to handle it
361 * specially as the VM subysystem does not know how to handle
362 * such a beast. We need the lock here b/c we need to clear
363 * all the mappings before the area can be reclaimed
364 * by someone else.
365 */
366 write_lock(&vmlist_lock);
367 for (p = &vmlist ; (tmp = *p) ; p = &tmp->next) {
368 if((tmp->flags & VM_IOREMAP) && (tmp->addr == addr)) {
369 if (tmp->flags & VM_ARM_SECTION_MAPPING) {
370 *p = tmp->next;
371 unmap_area_sections((unsigned long)tmp->addr,
372 tmp->size);
373 kfree(tmp);
374 section_mapping = 1;
375 }
376 break;
377 }
378 }
379 write_unlock(&vmlist_lock);
Lennert Buytenhek7cddc392006-07-03 12:26:02 +0100380#endif
Russell Kingff0daca2006-06-29 20:17:15 +0100381
382 if (!section_mapping)
Al Viro16226052006-10-09 02:09:49 +0100383 vunmap((void __force *)addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384}
385EXPORT_SYMBOL(__iounmap);