blob: d1e5ad7ab3bc6ae9b3a538a3601bccf929b0e998 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
10 * by Russell King
11 *
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
15 *
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
22 */
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/mm.h>
26#include <linux/vmalloc.h>
Russell Kingfced80c2008-09-06 12:10:45 +010027#include <linux/io.h>
Alessandro Rubini158e8bf2012-06-24 12:46:26 +010028#include <linux/sizes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Russell King15d07dc2012-03-28 18:30:01 +010030#include <asm/cp15.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010031#include <asm/cputype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/cacheflush.h>
Russell Kingff0daca2006-06-29 20:17:15 +010033#include <asm/mmu_context.h>
34#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/tlbflush.h>
David Howells9f97da72012-03-28 18:30:01 +010036#include <asm/system_info.h>
Russell Kingff0daca2006-06-29 20:17:15 +010037
Russell Kingb29e9f52007-04-21 10:47:29 +010038#include <asm/mach/map.h>
Rob Herringc2794432012-02-29 18:10:58 -060039#include <asm/mach/pci.h>
Russell Kingb29e9f52007-04-21 10:47:29 +010040#include "mm.h"
41
Joonsoo Kimed8fd212013-02-09 06:28:05 +010042
43LIST_HEAD(static_vmlist);
44
45static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
46 size_t size, unsigned int mtype)
47{
48 struct static_vm *svm;
49 struct vm_struct *vm;
50
51 list_for_each_entry(svm, &static_vmlist, list) {
52 vm = &svm->vm;
53 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
54 continue;
55 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
56 continue;
57
58 if (vm->phys_addr > paddr ||
59 paddr + size - 1 > vm->phys_addr + vm->size - 1)
60 continue;
61
62 return svm;
63 }
64
65 return NULL;
66}
67
68struct static_vm *find_static_vm_vaddr(void *vaddr)
69{
70 struct static_vm *svm;
71 struct vm_struct *vm;
72
73 list_for_each_entry(svm, &static_vmlist, list) {
74 vm = &svm->vm;
75
76 /* static_vmlist is ascending order */
77 if (vm->addr > vaddr)
78 break;
79
80 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
81 return svm;
82 }
83
84 return NULL;
85}
86
87void __init add_static_vm_early(struct static_vm *svm)
88{
89 struct static_vm *curr_svm;
90 struct vm_struct *vm;
91 void *vaddr;
92
93 vm = &svm->vm;
94 vm_area_add_early(vm);
95 vaddr = vm->addr;
96
97 list_for_each_entry(curr_svm, &static_vmlist, list) {
98 vm = &curr_svm->vm;
99
100 if (vm->addr > vaddr)
101 break;
102 }
103 list_add_tail(&svm->list, &curr_svm->list);
104}
105
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200106int ioremap_page(unsigned long virt, unsigned long phys,
107 const struct mem_type *mtype)
108{
Russell Kingd7461962010-07-26 10:29:13 +0100109 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
110 __pgprot(mtype->prot_pte));
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200111}
112EXPORT_SYMBOL(ioremap_page);
Russell Kingff0daca2006-06-29 20:17:15 +0100113
Nicolas Pitre3e996752012-11-25 03:24:32 +0100114void __check_vmalloc_seq(struct mm_struct *mm)
Russell Kingff0daca2006-06-29 20:17:15 +0100115{
116 unsigned int seq;
117
118 do {
Nicolas Pitre3e996752012-11-25 03:24:32 +0100119 seq = init_mm.context.vmalloc_seq;
Russell Kingff0daca2006-06-29 20:17:15 +0100120 memcpy(pgd_offset(mm, VMALLOC_START),
121 pgd_offset_k(VMALLOC_START),
122 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
123 pgd_index(VMALLOC_START)));
Nicolas Pitre3e996752012-11-25 03:24:32 +0100124 mm->context.vmalloc_seq = seq;
125 } while (seq != init_mm.context.vmalloc_seq);
Russell Kingff0daca2006-06-29 20:17:15 +0100126}
127
Catalin Marinasda028772011-11-22 17:30:29 +0000128#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
Russell Kingff0daca2006-06-29 20:17:15 +0100129/*
130 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
131 * the other CPUs will not see this change until their next context switch.
132 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
133 * which requires the new ioremap'd region to be referenced, the CPU will
134 * reference the _old_ region.
135 *
Russell King31aa8fd2009-12-18 11:10:03 +0000136 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
137 * mask the size back to 1MB aligned or we will overflow in the loop below.
Russell Kingff0daca2006-06-29 20:17:15 +0100138 */
139static void unmap_area_sections(unsigned long virt, unsigned long size)
140{
Russell King24f11ec2009-01-25 17:36:34 +0000141 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
Russell Kingff0daca2006-06-29 20:17:15 +0100142 pgd_t *pgd;
Catalin Marinas03a6b822011-11-22 17:30:27 +0000143 pud_t *pud;
144 pmd_t *pmdp;
Russell Kingff0daca2006-06-29 20:17:15 +0100145
146 flush_cache_vunmap(addr, end);
147 pgd = pgd_offset_k(addr);
Catalin Marinas03a6b822011-11-22 17:30:27 +0000148 pud = pud_offset(pgd, addr);
149 pmdp = pmd_offset(pud, addr);
Russell Kingff0daca2006-06-29 20:17:15 +0100150 do {
Catalin Marinas03a6b822011-11-22 17:30:27 +0000151 pmd_t pmd = *pmdp;
Russell Kingff0daca2006-06-29 20:17:15 +0100152
Russell Kingff0daca2006-06-29 20:17:15 +0100153 if (!pmd_none(pmd)) {
154 /*
155 * Clear the PMD from the page table, and
Nicolas Pitre3e996752012-11-25 03:24:32 +0100156 * increment the vmalloc sequence so others
Russell Kingff0daca2006-06-29 20:17:15 +0100157 * notice this change.
158 *
159 * Note: this is still racy on SMP machines.
160 */
161 pmd_clear(pmdp);
Nicolas Pitre3e996752012-11-25 03:24:32 +0100162 init_mm.context.vmalloc_seq++;
Russell Kingff0daca2006-06-29 20:17:15 +0100163
164 /*
165 * Free the page table, if there was one.
166 */
167 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800168 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
Russell Kingff0daca2006-06-29 20:17:15 +0100169 }
170
Catalin Marinas03a6b822011-11-22 17:30:27 +0000171 addr += PMD_SIZE;
172 pmdp += 2;
Russell Kingff0daca2006-06-29 20:17:15 +0100173 } while (addr < end);
174
175 /*
176 * Ensure that the active_mm is up to date - we want to
177 * catch any use-after-iounmap cases.
178 */
Nicolas Pitre3e996752012-11-25 03:24:32 +0100179 if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
180 __check_vmalloc_seq(current->active_mm);
Russell Kingff0daca2006-06-29 20:17:15 +0100181
182 flush_tlb_kernel_range(virt, end);
183}
184
185static int
186remap_area_sections(unsigned long virt, unsigned long pfn,
Russell Kingb29e9f52007-04-21 10:47:29 +0100187 size_t size, const struct mem_type *type)
Russell Kingff0daca2006-06-29 20:17:15 +0100188{
Russell Kingb29e9f52007-04-21 10:47:29 +0100189 unsigned long addr = virt, end = virt + size;
Russell Kingff0daca2006-06-29 20:17:15 +0100190 pgd_t *pgd;
Catalin Marinas03a6b822011-11-22 17:30:27 +0000191 pud_t *pud;
192 pmd_t *pmd;
Russell Kingff0daca2006-06-29 20:17:15 +0100193
194 /*
195 * Remove and free any PTE-based mapping, and
196 * sync the current kernel mapping.
197 */
198 unmap_area_sections(virt, size);
199
Russell Kingff0daca2006-06-29 20:17:15 +0100200 pgd = pgd_offset_k(addr);
Catalin Marinas03a6b822011-11-22 17:30:27 +0000201 pud = pud_offset(pgd, addr);
202 pmd = pmd_offset(pud, addr);
Russell Kingff0daca2006-06-29 20:17:15 +0100203 do {
Russell Kingb29e9f52007-04-21 10:47:29 +0100204 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
Russell Kingff0daca2006-06-29 20:17:15 +0100205 pfn += SZ_1M >> PAGE_SHIFT;
Russell Kingb29e9f52007-04-21 10:47:29 +0100206 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
Russell Kingff0daca2006-06-29 20:17:15 +0100207 pfn += SZ_1M >> PAGE_SHIFT;
208 flush_pmd_entry(pmd);
209
Catalin Marinas03a6b822011-11-22 17:30:27 +0000210 addr += PMD_SIZE;
211 pmd += 2;
Russell Kingff0daca2006-06-29 20:17:15 +0100212 } while (addr < end);
213
214 return 0;
215}
Lennert Buytenheka069c892006-07-01 19:58:20 +0100216
217static int
218remap_area_supersections(unsigned long virt, unsigned long pfn,
Russell Kingb29e9f52007-04-21 10:47:29 +0100219 size_t size, const struct mem_type *type)
Lennert Buytenheka069c892006-07-01 19:58:20 +0100220{
Russell Kingb29e9f52007-04-21 10:47:29 +0100221 unsigned long addr = virt, end = virt + size;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100222 pgd_t *pgd;
Catalin Marinas03a6b822011-11-22 17:30:27 +0000223 pud_t *pud;
224 pmd_t *pmd;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100225
226 /*
227 * Remove and free any PTE-based mapping, and
228 * sync the current kernel mapping.
229 */
230 unmap_area_sections(virt, size);
231
Lennert Buytenheka069c892006-07-01 19:58:20 +0100232 pgd = pgd_offset_k(virt);
Catalin Marinas03a6b822011-11-22 17:30:27 +0000233 pud = pud_offset(pgd, addr);
234 pmd = pmd_offset(pud, addr);
Lennert Buytenheka069c892006-07-01 19:58:20 +0100235 do {
236 unsigned long super_pmd_val, i;
237
Russell Kingb29e9f52007-04-21 10:47:29 +0100238 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
239 PMD_SECT_SUPER;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100240 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
241
242 for (i = 0; i < 8; i++) {
Lennert Buytenheka069c892006-07-01 19:58:20 +0100243 pmd[0] = __pmd(super_pmd_val);
244 pmd[1] = __pmd(super_pmd_val);
245 flush_pmd_entry(pmd);
246
Catalin Marinas03a6b822011-11-22 17:30:27 +0000247 addr += PMD_SIZE;
248 pmd += 2;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100249 }
250
251 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
252 } while (addr < end);
253
254 return 0;
255}
Russell Kingff0daca2006-06-29 20:17:15 +0100256#endif
257
Russell King31aa8fd2009-12-18 11:10:03 +0000258void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
259 unsigned long offset, size_t size, unsigned int mtype, void *caller)
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000260{
Russell Kingb29e9f52007-04-21 10:47:29 +0100261 const struct mem_type *type;
Russell Kingff0daca2006-06-29 20:17:15 +0100262 int err;
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000263 unsigned long addr;
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100264 struct vm_struct *area;
265 phys_addr_t paddr = __pfn_to_phys(pfn);
Lennert Buytenheka069c892006-07-01 19:58:20 +0100266
Catalin Marinasda028772011-11-22 17:30:29 +0000267#ifndef CONFIG_ARM_LPAE
Lennert Buytenheka069c892006-07-01 19:58:20 +0100268 /*
269 * High mappings must be supersection aligned
270 */
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100271 if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
Lennert Buytenheka069c892006-07-01 19:58:20 +0100272 return NULL;
Catalin Marinasda028772011-11-22 17:30:29 +0000273#endif
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000274
Russell King3603ab22007-05-05 20:59:27 +0100275 type = get_mem_type(mtype);
276 if (!type)
277 return NULL;
Russell Kingb29e9f52007-04-21 10:47:29 +0100278
Russell King6d78b5f2007-06-03 19:26:04 +0100279 /*
280 * Page align the mapping size, taking account of any offset.
281 */
282 size = PAGE_ALIGN(offset + size);
Russell Kingc924aff2006-12-17 23:29:57 +0000283
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400284 /*
285 * Try to reuse one of the static mapping whenever possible.
286 */
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100287 if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
288 struct static_vm *svm;
289
290 svm = find_static_vm_paddr(paddr, size, mtype);
291 if (svm) {
292 addr = (unsigned long)svm->vm.addr;
293 addr += paddr - svm->vm.phys_addr;
294 return (void __iomem *) (offset + addr);
295 }
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400296 }
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400297
298 /*
299 * Don't allow RAM to be mapped - this causes problems with ARMv6+
300 */
301 if (WARN_ON(pfn_valid(pfn)))
302 return NULL;
303
Russell King31aa8fd2009-12-18 11:10:03 +0000304 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000305 if (!area)
306 return NULL;
307 addr = (unsigned long)area->addr;
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100308 area->phys_addr = paddr;
Russell Kingff0daca2006-06-29 20:17:15 +0100309
Catalin Marinasda028772011-11-22 17:30:29 +0000310#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
Catalin Marinas412489a2007-01-25 14:16:47 +0100311 if (DOMAIN_IO == 0 &&
312 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
Russell King4a56c1e2007-04-21 10:16:48 +0100313 cpu_is_xsc3()) && pfn >= 0x100000 &&
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100314 !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
Lennert Buytenheka069c892006-07-01 19:58:20 +0100315 area->flags |= VM_ARM_SECTION_MAPPING;
Russell Kingb29e9f52007-04-21 10:47:29 +0100316 err = remap_area_supersections(addr, pfn, size, type);
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100317 } else if (!((paddr | size | addr) & ~PMD_MASK)) {
Russell Kingff0daca2006-06-29 20:17:15 +0100318 area->flags |= VM_ARM_SECTION_MAPPING;
Russell Kingb29e9f52007-04-21 10:47:29 +0100319 err = remap_area_sections(addr, pfn, size, type);
Russell Kingff0daca2006-06-29 20:17:15 +0100320 } else
321#endif
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100322 err = ioremap_page_range(addr, addr + size, paddr,
Russell Kingd7461962010-07-26 10:29:13 +0100323 __pgprot(type->prot_pte));
Russell Kingff0daca2006-06-29 20:17:15 +0100324
325 if (err) {
Catalin Marinas478922c2006-05-16 11:30:26 +0100326 vunmap((void *)addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000327 return NULL;
328 }
Russell Kingff0daca2006-06-29 20:17:15 +0100329
330 flush_cache_vmap(addr, addr + size);
331 return (void __iomem *) (offset + addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000332}
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000333
Laura Abbott9b971732013-05-16 19:40:22 +0100334void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
Russell King31aa8fd2009-12-18 11:10:03 +0000335 unsigned int mtype, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336{
Laura Abbott9b971732013-05-16 19:40:22 +0100337 phys_addr_t last_addr;
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000338 unsigned long offset = phys_addr & ~PAGE_MASK;
339 unsigned long pfn = __phys_to_pfn(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000341 /*
342 * Don't allow wraparound or zero size
343 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344 last_addr = phys_addr + size - 1;
345 if (!size || last_addr < phys_addr)
346 return NULL;
347
Russell King31aa8fd2009-12-18 11:10:03 +0000348 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
349 caller);
350}
351
352/*
353 * Remap an arbitrary physical address space into the kernel virtual
354 * address space. Needed when the kernel wants to access high addresses
355 * directly.
356 *
357 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
358 * have to convert them into an offset in a page-aligned mapping, but the
359 * caller shouldn't need to know that small detail.
360 */
361void __iomem *
362__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
363 unsigned int mtype)
364{
365 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
366 __builtin_return_address(0));
367}
368EXPORT_SYMBOL(__arm_ioremap_pfn);
369
Laura Abbott9b971732013-05-16 19:40:22 +0100370void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
Rob Herring4fe7ef32012-02-10 17:05:13 -0600371 unsigned int, void *) =
372 __arm_ioremap_caller;
373
Russell King31aa8fd2009-12-18 11:10:03 +0000374void __iomem *
Laura Abbott9b971732013-05-16 19:40:22 +0100375__arm_ioremap(phys_addr_t phys_addr, size_t size, unsigned int mtype)
Russell King31aa8fd2009-12-18 11:10:03 +0000376{
Rob Herring4fe7ef32012-02-10 17:05:13 -0600377 return arch_ioremap_caller(phys_addr, size, mtype,
378 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700379}
Russell King3603ab22007-05-05 20:59:27 +0100380EXPORT_SYMBOL(__arm_ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Tony Lindgren6c5482d2011-10-12 01:02:50 +0100382/*
383 * Remap an arbitrary physical address space into the kernel virtual
384 * address space as memory. Needed when the kernel wants to execute
385 * code in external memory. This is needed for reprogramming source
386 * clocks that would affect normal memory for example. Please see
387 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
388 */
389void __iomem *
Laura Abbott9b971732013-05-16 19:40:22 +0100390__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
Tony Lindgren6c5482d2011-10-12 01:02:50 +0100391{
392 unsigned int mtype;
393
394 if (cached)
Russell King2e2c9de2013-10-24 10:26:40 +0100395 mtype = MT_MEMORY_RWX;
Tony Lindgren6c5482d2011-10-12 01:02:50 +0100396 else
Russell King2e2c9de2013-10-24 10:26:40 +0100397 mtype = MT_MEMORY_RWX_NONCACHED;
Tony Lindgren6c5482d2011-10-12 01:02:50 +0100398
399 return __arm_ioremap_caller(phys_addr, size, mtype,
400 __builtin_return_address(0));
401}
402
Russell King09d9bae2008-09-05 14:08:44 +0100403void __iounmap(volatile void __iomem *io_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404{
Russell King09d9bae2008-09-05 14:08:44 +0100405 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100406 struct static_vm *svm;
Russell Kingff0daca2006-06-29 20:17:15 +0100407
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100408 /* If this is a static mapping, we must leave it alone */
409 svm = find_static_vm_vaddr(addr);
410 if (svm)
411 return;
412
Russell King6ae25a52011-12-08 18:02:04 +0000413#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100414 {
415 struct vm_struct *vm;
416
417 vm = find_vm_area(addr);
418
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400419 /*
420 * If this is a section based mapping we need to handle it
421 * specially as the VM subsystem does not know how to handle
422 * such a beast.
423 */
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100424 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400425 unmap_area_sections((unsigned long)vm->addr, vm->size);
Russell Kingff0daca2006-06-29 20:17:15 +0100426 }
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100427#endif
Russell Kingff0daca2006-06-29 20:17:15 +0100428
Russell King24f11ec2009-01-25 17:36:34 +0000429 vunmap(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430}
Rob Herring4fe7ef32012-02-10 17:05:13 -0600431
432void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
433
434void __arm_iounmap(volatile void __iomem *io_addr)
435{
436 arch_iounmap(io_addr);
437}
438EXPORT_SYMBOL(__arm_iounmap);
Rob Herringc2794432012-02-29 18:10:58 -0600439
440#ifdef CONFIG_PCI
Thomas Petazzoni1c8c3cf2014-05-19 11:04:39 +0100441static int pci_ioremap_mem_type = MT_DEVICE;
442
443void pci_ioremap_set_mem_type(int mem_type)
444{
445 pci_ioremap_mem_type = mem_type;
446}
447
Rob Herringc2794432012-02-29 18:10:58 -0600448int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
449{
450 BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
451
452 return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
453 PCI_IO_VIRT_BASE + offset + SZ_64K,
454 phys_addr,
Thomas Petazzoni1c8c3cf2014-05-19 11:04:39 +0100455 __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
Rob Herringc2794432012-02-29 18:10:58 -0600456}
457EXPORT_SYMBOL_GPL(pci_ioremap_io);
458#endif