blob: ff0eed23ddf1354afd26ee0711f34680380227a1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/arm/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 *
8 * Hacked for ARM by Phil Blundell <philb@gnu.org>
9 * Hacked to allow all architectures to build, and various cleanups
10 * by Russell King
11 *
12 * This allows a driver to remap an arbitrary region of bus memory into
13 * virtual space. One should *only* use readl, writel, memcpy_toio and
14 * so on with such remapped areas.
15 *
16 * Because the ARM only has a 32-bit address space we can't address the
17 * whole of the (physical) PCI space at once. PCI huge-mode addressing
18 * allows us to circumvent this restriction by splitting PCI space into
19 * two 2GB chunks and mapping only one at a time into processor memory.
20 * We use MMU protection domains to trap any attempt to access the bank
21 * that is not currently mapped. (This isn't fully implemented yet.)
22 */
23#include <linux/module.h>
24#include <linux/errno.h>
25#include <linux/mm.h>
26#include <linux/vmalloc.h>
Russell Kingfced80c2008-09-06 12:10:45 +010027#include <linux/io.h>
Alessandro Rubini158e8bf2012-06-24 12:46:26 +010028#include <linux/sizes.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029
Russell King15d07dc2012-03-28 18:30:01 +010030#include <asm/cp15.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010031#include <asm/cputype.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070032#include <asm/cacheflush.h>
Ard Biesheuvel29373672015-09-01 08:59:28 +020033#include <asm/early_ioremap.h>
Russell Kingff0daca2006-06-29 20:17:15 +010034#include <asm/mmu_context.h>
35#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070036#include <asm/tlbflush.h>
David Howells9f97da72012-03-28 18:30:01 +010037#include <asm/system_info.h>
Russell Kingff0daca2006-06-29 20:17:15 +010038
Russell Kingb29e9f52007-04-21 10:47:29 +010039#include <asm/mach/map.h>
Rob Herringc2794432012-02-29 18:10:58 -060040#include <asm/mach/pci.h>
Russell Kingb29e9f52007-04-21 10:47:29 +010041#include "mm.h"
42
Joonsoo Kimed8fd212013-02-09 06:28:05 +010043
44LIST_HEAD(static_vmlist);
45
46static struct static_vm *find_static_vm_paddr(phys_addr_t paddr,
47 size_t size, unsigned int mtype)
48{
49 struct static_vm *svm;
50 struct vm_struct *vm;
51
52 list_for_each_entry(svm, &static_vmlist, list) {
53 vm = &svm->vm;
54 if (!(vm->flags & VM_ARM_STATIC_MAPPING))
55 continue;
56 if ((vm->flags & VM_ARM_MTYPE_MASK) != VM_ARM_MTYPE(mtype))
57 continue;
58
59 if (vm->phys_addr > paddr ||
60 paddr + size - 1 > vm->phys_addr + vm->size - 1)
61 continue;
62
63 return svm;
64 }
65
66 return NULL;
67}
68
69struct static_vm *find_static_vm_vaddr(void *vaddr)
70{
71 struct static_vm *svm;
72 struct vm_struct *vm;
73
74 list_for_each_entry(svm, &static_vmlist, list) {
75 vm = &svm->vm;
76
77 /* static_vmlist is ascending order */
78 if (vm->addr > vaddr)
79 break;
80
81 if (vm->addr <= vaddr && vm->addr + vm->size > vaddr)
82 return svm;
83 }
84
85 return NULL;
86}
87
88void __init add_static_vm_early(struct static_vm *svm)
89{
90 struct static_vm *curr_svm;
91 struct vm_struct *vm;
92 void *vaddr;
93
94 vm = &svm->vm;
95 vm_area_add_early(vm);
96 vaddr = vm->addr;
97
98 list_for_each_entry(curr_svm, &static_vmlist, list) {
99 vm = &curr_svm->vm;
100
101 if (vm->addr > vaddr)
102 break;
103 }
104 list_add_tail(&svm->list, &curr_svm->list);
105}
106
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200107int ioremap_page(unsigned long virt, unsigned long phys,
108 const struct mem_type *mtype)
109{
Russell Kingd7461962010-07-26 10:29:13 +0100110 return ioremap_page_range(virt, virt + PAGE_SIZE, phys,
111 __pgprot(mtype->prot_pte));
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200112}
113EXPORT_SYMBOL(ioremap_page);
Russell Kingff0daca2006-06-29 20:17:15 +0100114
Nicolas Pitre3e996752012-11-25 03:24:32 +0100115void __check_vmalloc_seq(struct mm_struct *mm)
Russell Kingff0daca2006-06-29 20:17:15 +0100116{
117 unsigned int seq;
118
119 do {
Nicolas Pitre3e996752012-11-25 03:24:32 +0100120 seq = init_mm.context.vmalloc_seq;
Russell Kingff0daca2006-06-29 20:17:15 +0100121 memcpy(pgd_offset(mm, VMALLOC_START),
122 pgd_offset_k(VMALLOC_START),
123 sizeof(pgd_t) * (pgd_index(VMALLOC_END) -
124 pgd_index(VMALLOC_START)));
Nicolas Pitre3e996752012-11-25 03:24:32 +0100125 mm->context.vmalloc_seq = seq;
126 } while (seq != init_mm.context.vmalloc_seq);
Russell Kingff0daca2006-06-29 20:17:15 +0100127}
128
Catalin Marinasda028772011-11-22 17:30:29 +0000129#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
Russell Kingff0daca2006-06-29 20:17:15 +0100130/*
131 * Section support is unsafe on SMP - If you iounmap and ioremap a region,
132 * the other CPUs will not see this change until their next context switch.
133 * Meanwhile, (eg) if an interrupt comes in on one of those other CPUs
134 * which requires the new ioremap'd region to be referenced, the CPU will
135 * reference the _old_ region.
136 *
Russell King31aa8fd2009-12-18 11:10:03 +0000137 * Note that get_vm_area_caller() allocates a guard 4K page, so we need to
138 * mask the size back to 1MB aligned or we will overflow in the loop below.
Russell Kingff0daca2006-06-29 20:17:15 +0100139 */
140static void unmap_area_sections(unsigned long virt, unsigned long size)
141{
Russell King24f11ec2009-01-25 17:36:34 +0000142 unsigned long addr = virt, end = virt + (size & ~(SZ_1M - 1));
Russell Kingff0daca2006-06-29 20:17:15 +0100143 pgd_t *pgd;
Catalin Marinas03a6b822011-11-22 17:30:27 +0000144 pud_t *pud;
145 pmd_t *pmdp;
Russell Kingff0daca2006-06-29 20:17:15 +0100146
147 flush_cache_vunmap(addr, end);
148 pgd = pgd_offset_k(addr);
Catalin Marinas03a6b822011-11-22 17:30:27 +0000149 pud = pud_offset(pgd, addr);
150 pmdp = pmd_offset(pud, addr);
Russell Kingff0daca2006-06-29 20:17:15 +0100151 do {
Catalin Marinas03a6b822011-11-22 17:30:27 +0000152 pmd_t pmd = *pmdp;
Russell Kingff0daca2006-06-29 20:17:15 +0100153
Russell Kingff0daca2006-06-29 20:17:15 +0100154 if (!pmd_none(pmd)) {
155 /*
156 * Clear the PMD from the page table, and
Nicolas Pitre3e996752012-11-25 03:24:32 +0100157 * increment the vmalloc sequence so others
Russell Kingff0daca2006-06-29 20:17:15 +0100158 * notice this change.
159 *
160 * Note: this is still racy on SMP machines.
161 */
162 pmd_clear(pmdp);
Nicolas Pitre3e996752012-11-25 03:24:32 +0100163 init_mm.context.vmalloc_seq++;
Russell Kingff0daca2006-06-29 20:17:15 +0100164
165 /*
166 * Free the page table, if there was one.
167 */
168 if ((pmd_val(pmd) & PMD_TYPE_MASK) == PMD_TYPE_TABLE)
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800169 pte_free_kernel(&init_mm, pmd_page_vaddr(pmd));
Russell Kingff0daca2006-06-29 20:17:15 +0100170 }
171
Catalin Marinas03a6b822011-11-22 17:30:27 +0000172 addr += PMD_SIZE;
173 pmdp += 2;
Russell Kingff0daca2006-06-29 20:17:15 +0100174 } while (addr < end);
175
176 /*
177 * Ensure that the active_mm is up to date - we want to
178 * catch any use-after-iounmap cases.
179 */
Nicolas Pitre3e996752012-11-25 03:24:32 +0100180 if (current->active_mm->context.vmalloc_seq != init_mm.context.vmalloc_seq)
181 __check_vmalloc_seq(current->active_mm);
Russell Kingff0daca2006-06-29 20:17:15 +0100182
183 flush_tlb_kernel_range(virt, end);
184}
185
186static int
187remap_area_sections(unsigned long virt, unsigned long pfn,
Russell Kingb29e9f52007-04-21 10:47:29 +0100188 size_t size, const struct mem_type *type)
Russell Kingff0daca2006-06-29 20:17:15 +0100189{
Russell Kingb29e9f52007-04-21 10:47:29 +0100190 unsigned long addr = virt, end = virt + size;
Russell Kingff0daca2006-06-29 20:17:15 +0100191 pgd_t *pgd;
Catalin Marinas03a6b822011-11-22 17:30:27 +0000192 pud_t *pud;
193 pmd_t *pmd;
Russell Kingff0daca2006-06-29 20:17:15 +0100194
195 /*
196 * Remove and free any PTE-based mapping, and
197 * sync the current kernel mapping.
198 */
199 unmap_area_sections(virt, size);
200
Russell Kingff0daca2006-06-29 20:17:15 +0100201 pgd = pgd_offset_k(addr);
Catalin Marinas03a6b822011-11-22 17:30:27 +0000202 pud = pud_offset(pgd, addr);
203 pmd = pmd_offset(pud, addr);
Russell Kingff0daca2006-06-29 20:17:15 +0100204 do {
Russell Kingb29e9f52007-04-21 10:47:29 +0100205 pmd[0] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
Russell Kingff0daca2006-06-29 20:17:15 +0100206 pfn += SZ_1M >> PAGE_SHIFT;
Russell Kingb29e9f52007-04-21 10:47:29 +0100207 pmd[1] = __pmd(__pfn_to_phys(pfn) | type->prot_sect);
Russell Kingff0daca2006-06-29 20:17:15 +0100208 pfn += SZ_1M >> PAGE_SHIFT;
209 flush_pmd_entry(pmd);
210
Catalin Marinas03a6b822011-11-22 17:30:27 +0000211 addr += PMD_SIZE;
212 pmd += 2;
Russell Kingff0daca2006-06-29 20:17:15 +0100213 } while (addr < end);
214
215 return 0;
216}
Lennert Buytenheka069c892006-07-01 19:58:20 +0100217
218static int
219remap_area_supersections(unsigned long virt, unsigned long pfn,
Russell Kingb29e9f52007-04-21 10:47:29 +0100220 size_t size, const struct mem_type *type)
Lennert Buytenheka069c892006-07-01 19:58:20 +0100221{
Russell Kingb29e9f52007-04-21 10:47:29 +0100222 unsigned long addr = virt, end = virt + size;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100223 pgd_t *pgd;
Catalin Marinas03a6b822011-11-22 17:30:27 +0000224 pud_t *pud;
225 pmd_t *pmd;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100226
227 /*
228 * Remove and free any PTE-based mapping, and
229 * sync the current kernel mapping.
230 */
231 unmap_area_sections(virt, size);
232
Lennert Buytenheka069c892006-07-01 19:58:20 +0100233 pgd = pgd_offset_k(virt);
Catalin Marinas03a6b822011-11-22 17:30:27 +0000234 pud = pud_offset(pgd, addr);
235 pmd = pmd_offset(pud, addr);
Lennert Buytenheka069c892006-07-01 19:58:20 +0100236 do {
237 unsigned long super_pmd_val, i;
238
Russell Kingb29e9f52007-04-21 10:47:29 +0100239 super_pmd_val = __pfn_to_phys(pfn) | type->prot_sect |
240 PMD_SECT_SUPER;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100241 super_pmd_val |= ((pfn >> (32 - PAGE_SHIFT)) & 0xf) << 20;
242
243 for (i = 0; i < 8; i++) {
Lennert Buytenheka069c892006-07-01 19:58:20 +0100244 pmd[0] = __pmd(super_pmd_val);
245 pmd[1] = __pmd(super_pmd_val);
246 flush_pmd_entry(pmd);
247
Catalin Marinas03a6b822011-11-22 17:30:27 +0000248 addr += PMD_SIZE;
249 pmd += 2;
Lennert Buytenheka069c892006-07-01 19:58:20 +0100250 }
251
252 pfn += SUPERSECTION_SIZE >> PAGE_SHIFT;
253 } while (addr < end);
254
255 return 0;
256}
Russell Kingff0daca2006-06-29 20:17:15 +0100257#endif
258
Russell King20a10802015-07-01 10:06:32 +0100259static void __iomem * __arm_ioremap_pfn_caller(unsigned long pfn,
Russell King31aa8fd2009-12-18 11:10:03 +0000260 unsigned long offset, size_t size, unsigned int mtype, void *caller)
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000261{
Russell Kingb29e9f52007-04-21 10:47:29 +0100262 const struct mem_type *type;
Russell Kingff0daca2006-06-29 20:17:15 +0100263 int err;
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000264 unsigned long addr;
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100265 struct vm_struct *area;
266 phys_addr_t paddr = __pfn_to_phys(pfn);
Lennert Buytenheka069c892006-07-01 19:58:20 +0100267
Catalin Marinasda028772011-11-22 17:30:29 +0000268#ifndef CONFIG_ARM_LPAE
Lennert Buytenheka069c892006-07-01 19:58:20 +0100269 /*
270 * High mappings must be supersection aligned
271 */
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100272 if (pfn >= 0x100000 && (paddr & ~SUPERSECTION_MASK))
Lennert Buytenheka069c892006-07-01 19:58:20 +0100273 return NULL;
Catalin Marinasda028772011-11-22 17:30:29 +0000274#endif
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000275
Russell King3603ab22007-05-05 20:59:27 +0100276 type = get_mem_type(mtype);
277 if (!type)
278 return NULL;
Russell Kingb29e9f52007-04-21 10:47:29 +0100279
Russell King6d78b5f2007-06-03 19:26:04 +0100280 /*
281 * Page align the mapping size, taking account of any offset.
282 */
283 size = PAGE_ALIGN(offset + size);
Russell Kingc924aff2006-12-17 23:29:57 +0000284
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400285 /*
286 * Try to reuse one of the static mapping whenever possible.
287 */
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100288 if (size && !(sizeof(phys_addr_t) == 4 && pfn >= 0x100000)) {
289 struct static_vm *svm;
290
291 svm = find_static_vm_paddr(paddr, size, mtype);
292 if (svm) {
293 addr = (unsigned long)svm->vm.addr;
294 addr += paddr - svm->vm.phys_addr;
295 return (void __iomem *) (offset + addr);
296 }
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400297 }
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400298
299 /*
Ard Biesheuvel9ab9e4f2016-02-22 15:02:08 +0100300 * Don't allow RAM to be mapped with mismatched attributes - this
301 * causes problems with ARMv6+
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400302 */
Ard Biesheuvel9ab9e4f2016-02-22 15:02:08 +0100303 if (WARN_ON(pfn_valid(pfn) && mtype != MT_MEMORY_RW))
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400304 return NULL;
305
Russell King31aa8fd2009-12-18 11:10:03 +0000306 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000307 if (!area)
308 return NULL;
309 addr = (unsigned long)area->addr;
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100310 area->phys_addr = paddr;
Russell Kingff0daca2006-06-29 20:17:15 +0100311
Catalin Marinasda028772011-11-22 17:30:29 +0000312#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
Catalin Marinas412489a2007-01-25 14:16:47 +0100313 if (DOMAIN_IO == 0 &&
314 (((cpu_architecture() >= CPU_ARCH_ARMv6) && (get_cr() & CR_XP)) ||
Russell King4a56c1e2007-04-21 10:16:48 +0100315 cpu_is_xsc3()) && pfn >= 0x100000 &&
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100316 !((paddr | size | addr) & ~SUPERSECTION_MASK)) {
Lennert Buytenheka069c892006-07-01 19:58:20 +0100317 area->flags |= VM_ARM_SECTION_MAPPING;
Russell Kingb29e9f52007-04-21 10:47:29 +0100318 err = remap_area_supersections(addr, pfn, size, type);
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100319 } else if (!((paddr | size | addr) & ~PMD_MASK)) {
Russell Kingff0daca2006-06-29 20:17:15 +0100320 area->flags |= VM_ARM_SECTION_MAPPING;
Russell Kingb29e9f52007-04-21 10:47:29 +0100321 err = remap_area_sections(addr, pfn, size, type);
Russell Kingff0daca2006-06-29 20:17:15 +0100322 } else
323#endif
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100324 err = ioremap_page_range(addr, addr + size, paddr,
Russell Kingd7461962010-07-26 10:29:13 +0100325 __pgprot(type->prot_pte));
Russell Kingff0daca2006-06-29 20:17:15 +0100326
327 if (err) {
Catalin Marinas478922c2006-05-16 11:30:26 +0100328 vunmap((void *)addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000329 return NULL;
330 }
Russell Kingff0daca2006-06-29 20:17:15 +0100331
332 flush_cache_vmap(addr, addr + size);
333 return (void __iomem *) (offset + addr);
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000334}
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000335
Laura Abbott9b971732013-05-16 19:40:22 +0100336void __iomem *__arm_ioremap_caller(phys_addr_t phys_addr, size_t size,
Russell King31aa8fd2009-12-18 11:10:03 +0000337 unsigned int mtype, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
Laura Abbott9b971732013-05-16 19:40:22 +0100339 phys_addr_t last_addr;
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000340 unsigned long offset = phys_addr & ~PAGE_MASK;
341 unsigned long pfn = __phys_to_pfn(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342
Deepak Saxena9d4ae722006-01-09 19:23:11 +0000343 /*
344 * Don't allow wraparound or zero size
345 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700346 last_addr = phys_addr + size - 1;
347 if (!size || last_addr < phys_addr)
348 return NULL;
349
Russell King31aa8fd2009-12-18 11:10:03 +0000350 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
351 caller);
352}
353
354/*
355 * Remap an arbitrary physical address space into the kernel virtual
356 * address space. Needed when the kernel wants to access high addresses
357 * directly.
358 *
359 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
360 * have to convert them into an offset in a page-aligned mapping, but the
361 * caller shouldn't need to know that small detail.
362 */
363void __iomem *
364__arm_ioremap_pfn(unsigned long pfn, unsigned long offset, size_t size,
365 unsigned int mtype)
366{
367 return __arm_ioremap_pfn_caller(pfn, offset, size, mtype,
Russell King20a10802015-07-01 10:06:32 +0100368 __builtin_return_address(0));
Russell King31aa8fd2009-12-18 11:10:03 +0000369}
370EXPORT_SYMBOL(__arm_ioremap_pfn);
371
Laura Abbott9b971732013-05-16 19:40:22 +0100372void __iomem * (*arch_ioremap_caller)(phys_addr_t, size_t,
Rob Herring4fe7ef32012-02-10 17:05:13 -0600373 unsigned int, void *) =
374 __arm_ioremap_caller;
375
Russell King20a10802015-07-01 10:06:32 +0100376void __iomem *ioremap(resource_size_t res_cookie, size_t size)
Russell King31aa8fd2009-12-18 11:10:03 +0000377{
Russell King20a10802015-07-01 10:06:32 +0100378 return arch_ioremap_caller(res_cookie, size, MT_DEVICE,
379 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380}
Russell King20a10802015-07-01 10:06:32 +0100381EXPORT_SYMBOL(ioremap);
382
383void __iomem *ioremap_cache(resource_size_t res_cookie, size_t size)
Ard Biesheuvel20c5ea42016-03-04 10:05:39 +0100384 __alias(ioremap_cached);
385
386void __iomem *ioremap_cached(resource_size_t res_cookie, size_t size)
Russell King20a10802015-07-01 10:06:32 +0100387{
388 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_CACHED,
389 __builtin_return_address(0));
390}
391EXPORT_SYMBOL(ioremap_cache);
Ard Biesheuvel20c5ea42016-03-04 10:05:39 +0100392EXPORT_SYMBOL(ioremap_cached);
Russell King20a10802015-07-01 10:06:32 +0100393
394void __iomem *ioremap_wc(resource_size_t res_cookie, size_t size)
395{
396 return arch_ioremap_caller(res_cookie, size, MT_DEVICE_WC,
397 __builtin_return_address(0));
398}
399EXPORT_SYMBOL(ioremap_wc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400
Tony Lindgren6c5482d2011-10-12 01:02:50 +0100401/*
402 * Remap an arbitrary physical address space into the kernel virtual
403 * address space as memory. Needed when the kernel wants to execute
404 * code in external memory. This is needed for reprogramming source
405 * clocks that would affect normal memory for example. Please see
406 * CONFIG_GENERIC_ALLOCATOR for allocating external memory.
407 */
408void __iomem *
Laura Abbott9b971732013-05-16 19:40:22 +0100409__arm_ioremap_exec(phys_addr_t phys_addr, size_t size, bool cached)
Tony Lindgren6c5482d2011-10-12 01:02:50 +0100410{
411 unsigned int mtype;
412
413 if (cached)
Russell King2e2c9de2013-10-24 10:26:40 +0100414 mtype = MT_MEMORY_RWX;
Tony Lindgren6c5482d2011-10-12 01:02:50 +0100415 else
Russell King2e2c9de2013-10-24 10:26:40 +0100416 mtype = MT_MEMORY_RWX_NONCACHED;
Tony Lindgren6c5482d2011-10-12 01:02:50 +0100417
418 return __arm_ioremap_caller(phys_addr, size, mtype,
419 __builtin_return_address(0));
420}
421
Ard Biesheuvel9ab9e4f2016-02-22 15:02:08 +0100422void *arch_memremap_wb(phys_addr_t phys_addr, size_t size)
423{
424 return (__force void *)arch_ioremap_caller(phys_addr, size,
425 MT_MEMORY_RW,
426 __builtin_return_address(0));
427}
428
Russell King09d9bae2008-09-05 14:08:44 +0100429void __iounmap(volatile void __iomem *io_addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430{
Russell King09d9bae2008-09-05 14:08:44 +0100431 void *addr = (void *)(PAGE_MASK & (unsigned long)io_addr);
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100432 struct static_vm *svm;
Russell Kingff0daca2006-06-29 20:17:15 +0100433
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100434 /* If this is a static mapping, we must leave it alone */
435 svm = find_static_vm_vaddr(addr);
436 if (svm)
437 return;
438
Russell King6ae25a52011-12-08 18:02:04 +0000439#if !defined(CONFIG_SMP) && !defined(CONFIG_ARM_LPAE)
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100440 {
441 struct vm_struct *vm;
442
443 vm = find_vm_area(addr);
444
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400445 /*
446 * If this is a section based mapping we need to handle it
447 * specially as the VM subsystem does not know how to handle
448 * such a beast.
449 */
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100450 if (vm && (vm->flags & VM_ARM_SECTION_MAPPING))
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400451 unmap_area_sections((unsigned long)vm->addr, vm->size);
Russell Kingff0daca2006-06-29 20:17:15 +0100452 }
Joonsoo Kim101eeda2013-02-09 06:28:06 +0100453#endif
Russell Kingff0daca2006-06-29 20:17:15 +0100454
Russell King24f11ec2009-01-25 17:36:34 +0000455 vunmap(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456}
Rob Herring4fe7ef32012-02-10 17:05:13 -0600457
458void (*arch_iounmap)(volatile void __iomem *) = __iounmap;
459
Russell King20a10802015-07-01 10:06:32 +0100460void iounmap(volatile void __iomem *cookie)
Rob Herring4fe7ef32012-02-10 17:05:13 -0600461{
Russell King20a10802015-07-01 10:06:32 +0100462 arch_iounmap(cookie);
Rob Herring4fe7ef32012-02-10 17:05:13 -0600463}
Russell King20a10802015-07-01 10:06:32 +0100464EXPORT_SYMBOL(iounmap);
Rob Herringc2794432012-02-29 18:10:58 -0600465
466#ifdef CONFIG_PCI
Thomas Petazzoni1c8c3cf2014-05-19 11:04:39 +0100467static int pci_ioremap_mem_type = MT_DEVICE;
468
469void pci_ioremap_set_mem_type(int mem_type)
470{
471 pci_ioremap_mem_type = mem_type;
472}
473
Rob Herringc2794432012-02-29 18:10:58 -0600474int pci_ioremap_io(unsigned int offset, phys_addr_t phys_addr)
475{
476 BUG_ON(offset + SZ_64K > IO_SPACE_LIMIT);
477
478 return ioremap_page_range(PCI_IO_VIRT_BASE + offset,
479 PCI_IO_VIRT_BASE + offset + SZ_64K,
480 phys_addr,
Thomas Petazzoni1c8c3cf2014-05-19 11:04:39 +0100481 __pgprot(get_mem_type(pci_ioremap_mem_type)->prot_pte));
Rob Herringc2794432012-02-29 18:10:58 -0600482}
483EXPORT_SYMBOL_GPL(pci_ioremap_io);
484#endif
Ard Biesheuvel29373672015-09-01 08:59:28 +0200485
486/*
487 * Must be called after early_fixmap_init
488 */
489void __init early_ioremap_init(void)
490{
491 early_ioremap_setup();
492}