blob: 01e88c8bcab0fd4aa96409fe751745141737cb39 [file] [log] [blame]
Catalin Marinasfc478972012-03-05 11:49:29 +00001/*
2 * Based on arch/arm/mm/ioremap.c
3 *
4 * (C) Copyright 1995 1996 Linus Torvalds
5 * Hacked for ARM by Phil Blundell <philb@gnu.org>
6 * Hacked to allow all architectures to build, and various cleanups
7 * by Russell King
8 * Copyright (C) 2012 ARM Ltd.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License version 2 as
12 * published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program. If not, see <http://www.gnu.org/licenses/>.
21 */
22
23#include <linux/export.h>
24#include <linux/mm.h>
25#include <linux/vmalloc.h>
26#include <linux/io.h>
27
Mark Salterbf4b5582014-04-07 15:39:52 -070028#include <asm/fixmap.h>
29#include <asm/tlbflush.h>
30#include <asm/pgalloc.h>
31
Catalin Marinasfc478972012-03-05 11:49:29 +000032static void __iomem *__ioremap_caller(phys_addr_t phys_addr, size_t size,
33 pgprot_t prot, void *caller)
34{
35 unsigned long last_addr;
36 unsigned long offset = phys_addr & ~PAGE_MASK;
37 int err;
38 unsigned long addr;
39 struct vm_struct *area;
40
41 /*
42 * Page align the mapping address and size, taking account of any
43 * offset.
44 */
45 phys_addr &= PAGE_MASK;
46 size = PAGE_ALIGN(size + offset);
47
48 /*
49 * Don't allow wraparound, zero size or outside PHYS_MASK.
50 */
51 last_addr = phys_addr + size - 1;
52 if (!size || last_addr < phys_addr || (last_addr & ~PHYS_MASK))
53 return NULL;
54
55 /*
56 * Don't allow RAM to be mapped.
57 */
58 if (WARN_ON(pfn_valid(__phys_to_pfn(phys_addr))))
59 return NULL;
60
61 area = get_vm_area_caller(size, VM_IOREMAP, caller);
62 if (!area)
63 return NULL;
64 addr = (unsigned long)area->addr;
Min-Hua Chenda1f2b82014-12-26 16:52:10 +000065 area->phys_addr = phys_addr;
Catalin Marinasfc478972012-03-05 11:49:29 +000066
67 err = ioremap_page_range(addr, addr + size, phys_addr, prot);
68 if (err) {
69 vunmap((void *)addr);
70 return NULL;
71 }
72
73 return (void __iomem *)(offset + addr);
74}
75
76void __iomem *__ioremap(phys_addr_t phys_addr, size_t size, pgprot_t prot)
77{
78 return __ioremap_caller(phys_addr, size, prot,
79 __builtin_return_address(0));
80}
81EXPORT_SYMBOL(__ioremap);
82
83void __iounmap(volatile void __iomem *io_addr)
84{
Mark Salterc04e8e22013-10-24 15:54:17 +010085 unsigned long addr = (unsigned long)io_addr & PAGE_MASK;
Catalin Marinasfc478972012-03-05 11:49:29 +000086
Mark Salterc04e8e22013-10-24 15:54:17 +010087 /*
88 * We could get an address outside vmalloc range in case
89 * of ioremap_cache() reusing a RAM mapping.
90 */
91 if (VMALLOC_START <= addr && addr < VMALLOC_END)
92 vunmap((void *)addr);
Catalin Marinasfc478972012-03-05 11:49:29 +000093}
94EXPORT_SYMBOL(__iounmap);
Mark Salterc04e8e22013-10-24 15:54:17 +010095
96void __iomem *ioremap_cache(phys_addr_t phys_addr, size_t size)
97{
98 /* For normal memory we already have a cacheable mapping. */
99 if (pfn_valid(__phys_to_pfn(phys_addr)))
100 return (void __iomem *)__phys_to_virt(phys_addr);
101
102 return __ioremap_caller(phys_addr, size, __pgprot(PROT_NORMAL),
103 __builtin_return_address(0));
104}
105EXPORT_SYMBOL(ioremap_cache);
Mark Salterbf4b5582014-04-07 15:39:52 -0700106
Laura Abbottaf86e592014-11-21 21:50:42 +0000107/*
108 * Must be called after early_fixmap_init
109 */
Mark Salterbf4b5582014-04-07 15:39:52 -0700110void __init early_ioremap_init(void)
111{
Mark Salterbf4b5582014-04-07 15:39:52 -0700112 early_ioremap_setup();
113}