blob: 8df41e27636b62583688411c1162ddc798808c83 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
10 * by Russell King
11 *
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
15 *
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
22 */
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/mm.h>
26#include <linux/vmalloc.h>
Russell Kingfced80c2008-09-06 12:10:45 +010027#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028
Russell King15d07dc2012-03-28 18:30:01 +010029#include <asm/cp15.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010030#include <asm/cputype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/cacheflush.h>
Russell Kingff0daca2006-06-29 20:17:15 +010032#include <asm/mmu_context.h>
33#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/tlbflush.h>
Russell Kingff0daca2006-06-29 20:17:15 +010035#include <asm/sizes.h>
David Howells9f97da72012-03-28 18:30:01 +010036#include <asm/system_info.h>
Russell Kingff0daca2006-06-29 20:17:15 +010037
Russell Kingb29e9f52007-04-21 10:47:29 +010038#include <asm/mach/map.h>
39#include "mm.h"
40
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020041int ioremap_page(unsigned long virt, unsigned long phys,
42 const struct mem_type *mtype)
43{
Russell Kingd7461962010-07-26 10:29:13 +010044 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
45 __pgprot(mtype->prot_pte));
Hiroshi DOYU69d3a842009-01-28 21:32:08 +020046}
47EXPORT_SYMBOL(ioremap_page);
Russell Kingff0daca2006-06-29 20:17:15 +010048
Steve Mucklef132c6c2012-06-06 18:30:57 -070049int ioremap_pages(unsigned long virt, unsigned long phys, unsigned long size,
50 const struct mem_type *mtype)
51{
52 return ioremap_page_range(virt, virt + size, phys,
53 __pgprot(mtype->prot_pte));
54}
55EXPORT_SYMBOL(ioremap_pages);
56
Russell Kingff0daca2006-06-29 20:17:15 +010057void __check_kvm_seq(struct mm_struct *mm)
58{
59 unsigned int seq;
60
61 do {
62 seq = init_mm.context.kvm_seq;
63 memcpy(pgd_offset(mm, VMALLOC_START),
64 pgd_offset_k(VMALLOC_START),
65 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
66 pgd_index(VMALLOC_START)));
67 mm->context.kvm_seq = seq;
68 } while (seq != init_mm.context.kvm_seq);
69}
70
Catalin Marinasda028772011-11-22 17:30:29 +000071#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
Russell Kingff0daca2006-06-29 20:17:15 +010072/*
73 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
74 * the other CPUs will not see this change until their next context switch.
75 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
76 * which requires the new ioremap'd region to be referenced, the CPU will
77 * reference the _old_ region.
78 *
Russell King31aa8fd2009-12-18 11:10:03 +000079 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
80 * mask the size back to 1MB aligned or we will overflow in the loop below.
Russell Kingff0daca2006-06-29 20:17:15 +010081 */
82static void unmap_area_sections(unsigned long virt, unsigned long size)
83{
Russell King24f11ec2009-01-25 17:36:34 +000084 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
Russell Kingff0daca2006-06-29 20:17:15 +010085 pgd_t *pgd;
Catalin Marinas03a6b822011-11-22 17:30:27 +000086 pud_t *pud;
87 pmd_t *pmdp;
Russell Kingff0daca2006-06-29 20:17:15 +010088
89 flush_cache_vunmap(addr, end);
90 pgd = pgd_offset_k(addr);
Catalin Marinas03a6b822011-11-22 17:30:27 +000091 pud = pud_offset(pgd, addr);
92 pmdp = pmd_offset(pud, addr);
Russell Kingff0daca2006-06-29 20:17:15 +010093 do {
Catalin Marinas03a6b822011-11-22 17:30:27 +000094 pmd_t pmd = *pmdp;
Russell Kingff0daca2006-06-29 20:17:15 +010095
Russell Kingff0daca2006-06-29 20:17:15 +010096 if (!pmd_none(pmd)) {
97 /*
98 * Clear the PMD from the page table, and
99 * increment the kvm sequence so others
100 * notice this change.
101 *
102 * Note: this is still racy on SMP machines.
103 */
104 pmd_clear(pmdp);
105 init_mm.context.kvm_seq++;
106
107 /*
108 * Free the page table, if there was one.
109 */
110 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800111 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
Russell Kingff0daca2006-06-29 20:17:15 +0100112 }
113
Catalin Marinas03a6b822011-11-22 17:30:27 +0000114 addr += PMD_SIZE;
115 pmdp += 2;
Russell Kingff0daca2006-06-29 20:17:15 +0100116 } while (addr < end);
117
118 /*
119 * Ensure that the active_mm is up to date - we want to
120 * catch any use-after-iounmap cases.
121 */
122 if (current->active_mm->context.kvm_seq != init_mm.context.kvm_seq)
123 __check_kvm_seq(current->active_mm);
124
125 flush_tlb_kernel_range(virt, end);
126}
127
128static int
129remap_area_sections(unsigned long virt, unsigned long pfn,
Russell Kingb29e9f52007-04-21 10:47:29 +0100130 size_t size, const struct mem_type *type)
Russell Kingff0daca2006-06-29 20:17:15 +0100131{
Russell Kingb29e9f52007-04-21 10:47:29 +0100132 unsigned long addr = virt, end = virt + size;
Russell Kingff0daca2006-06-29 20:17:15 +0100133 pgd_t *pgd;
Catalin Marinas03a6b822011-11-22 17:30:27 +0000134 pud_t *pud;
135 pmd_t *pmd;
Russell Kingff0daca2006-06-29 20:17:15 +0100136
137 /*
138 * Remove and free any PTE-based mapping, and
139 * sync the current kernel mapping.
140 */
141 unmap_area_sections(virt, size);
142
Russell Kingff0daca2006-06-29 20:17:15 +0100143 pgd = pgd_offset_k(addr);
Catalin Marinas03a6b822011-11-22 17:30:27 +0000144 pud = pud_offset(pgd, addr);
145 pmd = pmd_offset(pud, addr);
Russell Kingff0daca2006-06-29 20:17:15 +0100146 do {
Russell Kingb29e9f52007-04-21 10:47:29 +0100147 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
Russell Kingff0daca2006-06-29 20:17:15 +0100148 pfn += SZ_1M >> PAGE_SHIFT;
Russell Kingb29e9f52007-04-21 10:47:29 +0100149 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
Russell Kingff0daca2006-06-29 20:17:15 +0100150 pfn += SZ_1M >> PAGE_SHIFT;
151 flush_pmd_entry(pmd);
152
Catalin Marinas03a6b822011-11-22 17:30:27 +0000153 addr += PMD_SIZE;
154 pmd += 2;
Russell Kingff0daca2006-06-29 20:17:15 +0100155 } while (addr < end);
156
157 return 0;
158}
Lennert Buytenheka069c892006-07-01 19:58:20 +0100159
160static int
161remap_area_supersections(unsigned long virt, unsigned long pfn,
Russell Kingb29e9f52007-04-21 10:47:29 +0100162 size_t size, const struct mem_type *type)
Lennert Buytenheka069c892006-07-01 19:58:20 +0100163{
Russell Kingb29e9f52007-04-21 10:47:29 +0100164 unsigned long addr = virt, end = virt + size;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100165 pgd_t *pgd;
Catalin Marinas03a6b822011-11-22 17:30:27 +0000166 pud_t *pud;
167 pmd_t *pmd;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100168
169 /*
170 * Remove and free any PTE-based mapping, and
171 * sync the current kernel mapping.
172 */
173 unmap_area_sections(virt, size);
174
Lennert Buytenheka069c892006-07-01 19:58:20 +0100175 pgd = pgd_offset_k(virt);
Catalin Marinas03a6b822011-11-22 17:30:27 +0000176 pud = pud_offset(pgd, addr);
177 pmd = pmd_offset(pud, addr);
Lennert Buytenheka069c892006-07-01 19:58:20 +0100178 do {
179 unsigned long super_pmd_val, i;
180
Russell Kingb29e9f52007-04-21 10:47:29 +0100181 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
182 PMD_SECT_SUPER;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100183 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
184
185 for (i = 0; i < 8; i++) {
Lennert Buytenheka069c892006-07-01 19:58:20 +0100186 pmd[0] = __pmd(super_pmd_val);
187 pmd[1] = __pmd(super_pmd_val);
188 flush_pmd_entry(pmd);
189
Catalin Marinas03a6b822011-11-22 17:30:27 +0000190 addr += PMD_SIZE;
191 pmd += 2;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100192 }
193
194 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
195 } while (addr < end);
196
197 return 0;
198}
Russell Kingff0daca2006-06-29 20:17:15 +0100199#endif
200
Russell King31aa8fd2009-12-18 11:10:03 +0000201void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
202 unsigned long offset, size_t size, unsigned int mtype, void *caller)
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000203{
Russell Kingb29e9f52007-04-21 10:47:29 +0100204 const struct mem_type *type;
Russell Kingff0daca2006-06-29 20:17:15 +0100205 int err;
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000206 unsigned long addr;
207 struct vm_struct * area;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100208
Catalin Marinasda028772011-11-22 17:30:29 +0000209#ifndef CONFIG_ARM_LPAE
Lennert Buytenheka069c892006-07-01 19:58:20 +0100210 /*
211 * High mappings must be supersection aligned
212 */
213 if (pfn >= 0x100000 && (__pfn_to_phys(pfn) & ~SUPERSECTION_MASK))
214 return NULL;
Catalin Marinasda028772011-11-22 17:30:29 +0000215#endif
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000216
Russell King3603ab22007-05-05 20:59:27 +0100217 type = get_mem_type(mtype);
218 if (!type)
219 return NULL;
Russell Kingb29e9f52007-04-21 10:47:29 +0100220
Russell King6d78b5f2007-06-03 19:26:04 +0100221 /*
222 * Page align the mapping size, taking account of any offset.
223 */
224 size = PAGE_ALIGN(offset + size);
Russell Kingc924aff2006-12-17 23:29:57 +0000225
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400226 /*
227 * Try to reuse one of the static mapping whenever possible.
228 */
229 read_lock(&vmlist_lock);
230 for (area = vmlist; area; area = area->next) {
231 if (!size || (sizeof(phys_addr_t) == 4 && pfn >= 0x100000))
232 break;
233 if (!(area->flags & VM_ARM_STATIC_MAPPING))
234 continue;
235 if ((area->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
236 continue;
237 if (__phys_to_pfn(area->phys_addr) > pfn ||
Russell King97f10402012-01-29 14:55:21 +0000238 __pfn_to_phys(pfn) + size-1 > area->phys_addr + area->size-1)
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400239 continue;
240 /* we can drop the lock here as we know *area is static */
241 read_unlock(&vmlist_lock);
242 addr = (unsigned long)area->addr;
243 addr += __pfn_to_phys(pfn) - area->phys_addr;
244 return (void __iomem *) (offset + addr);
245 }
246 read_unlock(&vmlist_lock);
247
248 /*
249 * Don't allow RAM to be mapped - this causes problems with ARMv6+
250 */
251 if (WARN_ON(pfn_valid(pfn)))
252 return NULL;
253
Russell King31aa8fd2009-12-18 11:10:03 +0000254 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000255 if (!area)
256 return NULL;
257 addr = (unsigned long)area->addr;
Russell Kingff0daca2006-06-29 20:17:15 +0100258
Catalin Marinasda028772011-11-22 17:30:29 +0000259#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
Catalin Marinas412489a2007-01-25 14:16:47 +0100260 if (DOMAIN_IO == 0 &&
261 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
Russell King4a56c1e2007-04-21 10:16:48 +0100262 cpu_is_xsc3()) && pfn >= 0x100000 &&
Lennert Buytenheka069c892006-07-01 19:58:20 +0100263 !((__pfn_to_phys(pfn) | size | addr) & ~SUPERSECTION_MASK)) {
264 area->flags |= VM_ARM_SECTION_MAPPING;
Russell Kingb29e9f52007-04-21 10:47:29 +0100265 err = remap_area_supersections(addr, pfn, size, type);
Lennert Buytenheka069c892006-07-01 19:58:20 +0100266 } else if (!((__pfn_to_phys(pfn) | size | addr) & ~PMD_MASK)) {
Russell Kingff0daca2006-06-29 20:17:15 +0100267 area->flags |= VM_ARM_SECTION_MAPPING;
Russell Kingb29e9f52007-04-21 10:47:29 +0100268 err = remap_area_sections(addr, pfn, size, type);
Russell Kingff0daca2006-06-29 20:17:15 +0100269 } else
270#endif
Russell Kingd7461962010-07-26 10:29:13 +0100271 err = ioremap_page_range(addr, addr + size, __pfn_to_phys(pfn),
272 __pgprot(type->prot_pte));
Russell Kingff0daca2006-06-29 20:17:15 +0100273
274 if (err) {
Catalin Marinas478922c2006-05-16 11:30:26 +0100275 vunmap((void *)addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000276 return NULL;
277 }
Russell Kingff0daca2006-06-29 20:17:15 +0100278
279 flush_cache_vmap(addr, addr + size);
280 return (void __iomem *) (offset + addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000281}
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000282
Russell King31aa8fd2009-12-18 11:10:03 +0000283void __iomem *__arm_ioremap_caller(unsigned long phys_addr, size_t size,
284 unsigned int mtype, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285{
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000286 unsigned long last_addr;
287 unsigned long offset = phys_addr & ~PAGE_MASK;
288 unsigned long pfn = __phys_to_pfn(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000290 /*
291 * Don't allow wraparound or zero size
292 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293 last_addr = phys_addr + size - 1;
294 if (!size || last_addr < phys_addr)
295 return NULL;
296
Russell King31aa8fd2009-12-18 11:10:03 +0000297 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
298 caller);
299}
300
301/*
302 * Remap an arbitrary physical address space into the kernel virtual
303 * address space. Needed when the kernel wants to access high addresses
304 * directly.
305 *
306 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
307 * have to convert them into an offset in a page-aligned mapping, but the
308 * caller shouldn't need to know that small detail.
309 */
310void __iomem *
311__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
312 unsigned int mtype)
313{
314 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
315 __builtin_return_address(0));
316}
317EXPORT_SYMBOL(__arm_ioremap_pfn);
318
Rob Herring4fe7ef32012-02-10 17:05:13 -0600319void __iomem * (*arch_ioremap_caller)(unsigned long, size_t,
320 unsigned int, void *) =
321 __arm_ioremap_caller;
322
Russell King31aa8fd2009-12-18 11:10:03 +0000323void __iomem *
324__arm_ioremap(unsigned long phys_addr, size_t size, unsigned int mtype)
325{
Rob Herring4fe7ef32012-02-10 17:05:13 -0600326 return arch_ioremap_caller(phys_addr, size, mtype,
327 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328}
Russell King3603ab22007-05-05 20:59:27 +0100329EXPORT_SYMBOL(__arm_ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330
Tony Lindgren6c5482d2011-10-12 01:02:50 +0100331/*
332 * Remap an arbitrary physical address space into the kernel virtual
333 * address space as memory. Needed when the kernel wants to execute
334 * code in external memory. This is needed for reprogramming source
335 * clocks that would affect normal memory for example. Please see
336 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
337 */
338void __iomem *
339__arm_ioremap_exec(unsigned long phys_addr, size_t size, bool cached)
340{
341 unsigned int mtype;
342
343 if (cached)
344 mtype = MT_MEMORY;
345 else
346 mtype = MT_MEMORY_NONCACHED;
347
348 return __arm_ioremap_caller(phys_addr, size, mtype,
349 __builtin_return_address(0));
350}
351
Russell King09d9bae2008-09-05 14:08:44 +0100352void __iounmap(volatile void __iomem *io_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353{
Russell King09d9bae2008-09-05 14:08:44 +0100354 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
Nicolas Pitre6ee723a2011-09-15 22:12:19 -0400355 struct vm_struct *vm;
Russell Kingff0daca2006-06-29 20:17:15 +0100356
Nicolas Pitre6ee723a2011-09-15 22:12:19 -0400357 read_lock(&vmlist_lock);
358 for (vm = vmlist; vm; vm = vm->next) {
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400359 if (vm->addr > addr)
360 break;
361 if (!(vm->flags & VM_IOREMAP))
362 continue;
363 /* If this is a static mapping we must leave it alone */
364 if ((vm->flags & VM_ARM_STATIC_MAPPING) &&
365 (vm->addr <= addr) && (vm->addr + vm->size > addr)) {
366 read_unlock(&vmlist_lock);
367 return;
368 }
Russell King6ae25a52011-12-08 18:02:04 +0000369#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400370 /*
371 * If this is a section based mapping we need to handle it
372 * specially as the VM subsystem does not know how to handle
373 * such a beast.
374 */
375 if ((vm->addr == addr) &&
376 (vm->flags & VM_ARM_SECTION_MAPPING)) {
377 unmap_area_sections((unsigned long)vm->addr, vm->size);
Russell Kingff0daca2006-06-29 20:17:15 +0100378 break;
379 }
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400380#endif
Russell Kingff0daca2006-06-29 20:17:15 +0100381 }
Nicolas Pitre6ee723a2011-09-15 22:12:19 -0400382 read_unlock(&vmlist_lock);
Russell Kingff0daca2006-06-29 20:17:15 +0100383
Russell King24f11ec2009-01-25 17:36:34 +0000384 vunmap(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385}
Rob Herring4fe7ef32012-02-10 17:05:13 -0600386
387void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
388
389void __arm_iounmap(volatile void __iomem *io_addr)
390{
391 arch_iounmap(io_addr);
392}
393EXPORT_SYMBOL(__arm_iounmap);