blob: 334e63ca7b2b468d096828cd56684604ca3ab0c4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Pekka Paalanend61fc442008-05-12 21:20:57 +020015#include <linux/mmiotrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010017#include <asm/cacheflush.h>
18#include <asm/e820.h>
19#include <asm/fixmap.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010022#include <asm/pgalloc.h>
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -070023#include <asm/pat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Jeremy Fitzhardinge78c86e52009-09-10 10:09:38 -070025#include "physaddr.h"
Thomas Gleixner240d3a72008-01-30 13:34:05 +010026
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010027int page_is_ram(unsigned long pagenr)
28{
Ingo Molnar756a6c62008-03-25 08:31:17 +010029 resource_size_t addr, end;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010030 int i;
31
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080032 /*
33 * A special case is the first 4Kb of memory;
34 * This is a BIOS owned area, not kernel ram, but generally
35 * not listed as such in the E820 table.
36 */
37 if (pagenr == 0)
38 return 0;
39
Arjan van de Ven156fbc32008-02-18 09:58:45 -080040 /*
41 * Second special case: Some BIOSen report the PC BIOS
42 * area (640->1Mb) as ram even though it is not.
43 */
44 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
45 pagenr < (BIOS_END >> PAGE_SHIFT))
46 return 0;
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080047
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010048 for (i = 0; i < e820.nr_map; i++) {
49 /*
50 * Not usable memory:
51 */
52 if (e820.map[i].type != E820_RAM)
53 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010054 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
55 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +010056
Thomas Gleixner950f9d92008-01-30 13:34:06 +010057
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010058 if ((pagenr >= addr) && (pagenr < end))
59 return 1;
60 }
61 return 0;
62}
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010065 * Fix up the linear direct mapping of the kernel to avoid cache attribute
66 * conflicts.
67 */
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070068int ioremap_change_attr(unsigned long vaddr, unsigned long size,
69 unsigned long prot_val)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010070{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010071 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +010072 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010073
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070074 switch (prot_val) {
75 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010076 default:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070077 err = _set_memory_uc(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010078 break;
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -070079 case _PAGE_CACHE_WC:
80 err = _set_memory_wc(vaddr, nrpages);
81 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070082 case _PAGE_CACHE_WB:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070083 err = _set_memory_wb(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010084 break;
85 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010086
87 return err;
88}
89
90/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 * Remap an arbitrary physical address space into the kernel virtual
92 * address space. Needed when the kernel wants to access high addresses
93 * directly.
94 *
95 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
96 * have to convert them into an offset in a page-aligned mapping, but the
97 * caller shouldn't need to know that small detail.
98 */
Christoph Lameter23016962008-04-28 02:12:42 -070099static void __iomem *__ioremap_caller(resource_size_t phys_addr,
100 unsigned long size, unsigned long prot_val, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101{
Ingo Molnar756a6c62008-03-25 08:31:17 +0100102 unsigned long pfn, offset, vaddr;
103 resource_size_t last_addr;
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200104 const resource_size_t unaligned_phys_addr = phys_addr;
105 const unsigned long unaligned_size = size;
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100106 struct vm_struct *area;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700107 unsigned long new_prot_val;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100108 pgprot_t prot;
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700109 int retval;
Pekka Paalanend61fc442008-05-12 21:20:57 +0200110 void __iomem *ret_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112 /* Don't allow wraparound or zero size */
113 last_addr = phys_addr + size - 1;
114 if (!size || last_addr < phys_addr)
115 return NULL;
116
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100117 if (!phys_addr_valid(phys_addr)) {
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700118 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700119 (unsigned long long)phys_addr);
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100120 WARN_ON_ONCE(1);
121 return NULL;
122 }
123
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 /*
125 * Don't remap the low PCI/ISA area, it's always mapped..
126 */
Andreas Herrmannbcc643d2008-06-20 21:58:46 +0200127 if (is_ISA_range(phys_addr, last_addr))
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100128 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130 /*
Suresh Siddha379daf62008-09-25 18:43:34 -0700131 * Check if the request spans more than any BAR in the iomem resource
132 * tree.
133 */
Ingo Molnar88085002008-12-12 09:20:12 +0100134 WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
135 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
Suresh Siddha379daf62008-09-25 18:43:34 -0700136
137 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 * Don't allow anybody to remap normal RAM that we're using..
139 */
Andres Salomoncb8ab682008-04-30 11:30:24 -0400140 for (pfn = phys_addr >> PAGE_SHIFT;
141 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
142 pfn++) {
Ingo Molnarbdd3cee2008-02-28 14:10:49 +0100143
Ingo Molnarba748d22008-03-03 09:37:41 +0100144 int is_ram = page_is_ram(pfn);
145
146 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100147 return NULL;
Ingo Molnarba748d22008-03-03 09:37:41 +0100148 WARN_ON_ONCE(is_ram);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149 }
150
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700151 /*
152 * Mappings have to be page-aligned
153 */
154 offset = phys_addr & ~PAGE_MASK;
155 phys_addr &= PAGE_MASK;
156 size = PAGE_ALIGN(last_addr+1) - phys_addr;
157
Andi Kleene213e872008-08-15 18:12:47 +0200158 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700159 prot_val, &new_prot_val);
160 if (retval) {
Venkatesh Pallipadi279e6692009-07-10 09:57:33 -0700161 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700162 return NULL;
163 }
164
165 if (prot_val != new_prot_val) {
H. Peter Anvinb8551922009-08-26 17:17:51 -0700166 if (!is_new_memtype_allowed(phys_addr, size,
167 prot_val, new_prot_val)) {
Venkatesh Pallipadi279e6692009-07-10 09:57:33 -0700168 printk(KERN_ERR
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700169 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700170 (unsigned long long)phys_addr,
171 (unsigned long long)(phys_addr + size),
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700172 prot_val, new_prot_val);
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700173 free_memtype(phys_addr, phys_addr + size);
174 return NULL;
175 }
176 prot_val = new_prot_val;
177 }
178
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700179 switch (prot_val) {
180 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100181 default:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700182 prot = PAGE_KERNEL_IO_NOCACHE;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100183 break;
Suresh Siddhade33c442008-04-25 17:07:22 -0700184 case _PAGE_CACHE_UC_MINUS:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700185 prot = PAGE_KERNEL_IO_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700186 break;
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700187 case _PAGE_CACHE_WC:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700188 prot = PAGE_KERNEL_IO_WC;
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700189 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700190 case _PAGE_CACHE_WB:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700191 prot = PAGE_KERNEL_IO;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100192 break;
193 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700194
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 * Ok, go for it..
197 */
Christoph Lameter23016962008-04-28 02:12:42 -0700198 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199 if (!area)
200 return NULL;
201 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100202 vaddr = (unsigned long) area->addr;
Suresh Siddha43a432b2009-04-09 14:26:47 -0700203
204 if (kernel_map_sync_memtype(phys_addr, size, prot_val)) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700205 free_memtype(phys_addr, phys_addr + size);
Ingo Molnarb16bf712008-02-28 14:02:08 +0100206 free_vm_area(area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 return NULL;
208 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100209
Suresh Siddha43a432b2009-04-09 14:26:47 -0700210 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700211 free_memtype(phys_addr, phys_addr + size);
Suresh Siddha43a432b2009-04-09 14:26:47 -0700212 free_vm_area(area);
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100213 return NULL;
214 }
215
Pekka Paalanend61fc442008-05-12 21:20:57 +0200216 ret_addr = (void __iomem *) (vaddr + offset);
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200217 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
Pekka Paalanend61fc442008-05-12 21:20:57 +0200218
219 return ret_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221
222/**
223 * ioremap_nocache - map bus memory into CPU space
224 * @offset: bus address of the memory
225 * @size: size of the resource to map
226 *
227 * ioremap_nocache performs a platform specific sequence of operations to
228 * make bus memory CPU accessible via the readb/readw/readl/writeb/
229 * writew/writel functions and the other mmio helpers. The returned
230 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100231 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 *
233 * This version of ioremap ensures that the memory is marked uncachable
234 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100235 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 * busses. In particular driver authors should read up on PCI writes
237 *
238 * It's useful if some control registers are in such an area and
239 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100240 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 * Must be freed with iounmap.
242 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700243void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244{
Suresh Siddhade33c442008-04-25 17:07:22 -0700245 /*
246 * Ideally, this should be:
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200247 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700248 *
249 * Till we fix all X drivers to use ioremap_wc(), we will use
250 * UC MINUS.
251 */
252 unsigned long val = _PAGE_CACHE_UC_MINUS;
253
254 return __ioremap_caller(phys_addr, size, val,
Christoph Lameter23016962008-04-28 02:12:42 -0700255 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700257EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700259/**
260 * ioremap_wc - map memory into CPU space write combined
261 * @offset: bus address of the memory
262 * @size: size of the resource to map
263 *
264 * This version of ioremap ensures that the memory is marked write combining.
265 * Write combining allows faster writes to some hardware devices.
266 *
267 * Must be freed with iounmap.
268 */
venkatesh.pallipadi@intel.comd639bab2009-01-09 16:13:13 -0800269void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700270{
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200271 if (pat_enabled)
Christoph Lameter23016962008-04-28 02:12:42 -0700272 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
273 __builtin_return_address(0));
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700274 else
275 return ioremap_nocache(phys_addr, size);
276}
277EXPORT_SYMBOL(ioremap_wc);
278
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700279void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100280{
Christoph Lameter23016962008-04-28 02:12:42 -0700281 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
282 __builtin_return_address(0));
Thomas Gleixner5f868152008-01-30 13:34:06 +0100283}
284EXPORT_SYMBOL(ioremap_cache);
285
Venkatesh Pallipadia361ee52008-07-10 10:09:59 +0200286static void __iomem *ioremap_default(resource_size_t phys_addr,
287 unsigned long size)
288{
289 unsigned long flags;
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700290 void __iomem *ret;
Venkatesh Pallipadia361ee52008-07-10 10:09:59 +0200291 int err;
292
293 /*
294 * - WB for WB-able memory and no other conflicting mappings
295 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
296 * - Inherit from confliting mappings otherwise
297 */
Suresh Siddhab6ff32d2009-04-09 14:26:51 -0700298 err = reserve_memtype(phys_addr, phys_addr + size,
299 _PAGE_CACHE_WB, &flags);
Venkatesh Pallipadia361ee52008-07-10 10:09:59 +0200300 if (err < 0)
301 return NULL;
302
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700303 ret = __ioremap_caller(phys_addr, size, flags,
304 __builtin_return_address(0));
Venkatesh Pallipadia361ee52008-07-10 10:09:59 +0200305
306 free_memtype(phys_addr, phys_addr + size);
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700307 return ret;
Venkatesh Pallipadia361ee52008-07-10 10:09:59 +0200308}
309
Rik van Riel28b2ee22008-07-23 21:27:05 -0700310void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
311 unsigned long prot_val)
312{
313 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
314 __builtin_return_address(0));
315}
316EXPORT_SYMBOL(ioremap_prot);
317
Andi Kleenbf5421c2005-12-12 22:17:09 -0800318/**
319 * iounmap - Free a IO remapping
320 * @addr: virtual address from ioremap_*
321 *
322 * Caller must ensure there is only one unmapping for the same pointer.
323 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324void iounmap(volatile void __iomem *addr)
325{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800326 struct vm_struct *p, *o;
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700327
328 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 return;
330
331 /*
332 * __ioremap special-cases the PCI/ISA range by not instantiating a
333 * vm_area and by simply returning an address into the kernel mapping
334 * of ISA space. So handle that here.
335 */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200336 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
337 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 return;
339
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100340 addr = (volatile void __iomem *)
341 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800342
Pekka Paalanend61fc442008-05-12 21:20:57 +0200343 mmiotrace_iounmap(addr);
344
Andi Kleenbf5421c2005-12-12 22:17:09 -0800345 /* Use the vm area unlocked, assuming the caller
346 ensures there isn't another iounmap for the same address
347 in parallel. Reuse of the virtual address is prevented by
348 leaving it in the global lists until we're done with it.
349 cpa takes care of the direct mappings. */
350 read_lock(&vmlist_lock);
351 for (p = vmlist; p; p = p->next) {
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200352 if (p->addr == (void __force *)addr)
Andi Kleenbf5421c2005-12-12 22:17:09 -0800353 break;
354 }
355 read_unlock(&vmlist_lock);
356
357 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100358 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700359 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800360 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 }
362
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700363 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
364
Andi Kleenbf5421c2005-12-12 22:17:09 -0800365 /* Finally remove it */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200366 o = remove_vm_area((void __force *)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800367 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100368 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700370EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700372/*
373 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
374 * access
375 */
376void *xlate_dev_mem_ptr(unsigned long phys)
377{
378 void *addr;
379 unsigned long start = phys & PAGE_MASK;
380
381 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
382 if (page_is_ram(start >> PAGE_SHIFT))
383 return __va(phys);
384
Ingo Molnarae94b802008-07-12 07:29:02 +0200385 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700386 if (addr)
387 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
388
389 return addr;
390}
391
392void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
393{
394 if (page_is_ram(phys >> PAGE_SHIFT))
395 return;
396
397 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
398 return;
399}
400
Jaswinder Singh4b6e9f22008-07-23 17:39:16 +0530401static int __initdata early_ioremap_debug;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100402
403static int __init early_ioremap_debug_setup(char *str)
404{
405 early_ioremap_debug = 1;
406
Huang, Ying793b24a2008-01-30 13:33:45 +0100407 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100408}
Huang, Ying793b24a2008-01-30 13:33:45 +0100409early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100410
Huang, Ying0947b2f2008-01-30 13:33:44 +0100411static __initdata int after_paging_init;
Jeremy Fitzhardinge45c7b282009-03-20 17:53:34 -0700412static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100413
Ian Campbell551889a2008-02-09 23:24:09 +0100414static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100415{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100416 /* Don't assume we're using swapper_pg_dir at this point */
417 pgd_t *base = __va(read_cr3());
418 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a2008-02-09 23:24:09 +0100419 pud_t *pud = pud_offset(pgd, addr);
420 pmd_t *pmd = pmd_offset(pud, addr);
421
422 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100423}
424
Ian Campbell551889a2008-02-09 23:24:09 +0100425static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100426{
Ian Campbell551889a2008-02-09 23:24:09 +0100427 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100428}
429
Wang Chen88272472009-03-07 13:34:19 +0800430static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
431
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100432void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100433{
Ian Campbell551889a2008-02-09 23:24:09 +0100434 pmd_t *pmd;
Wang Chen88272472009-03-07 13:34:19 +0800435 int i;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100436
Ingo Molnard18d6d62008-01-30 13:33:45 +0100437 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100438 printk(KERN_INFO "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100439
Wang Chen88272472009-03-07 13:34:19 +0800440 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
Wang Chen9f4f25c2009-03-25 14:07:11 +0100441 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
Wang Chen88272472009-03-07 13:34:19 +0800442
Ian Campbell551889a2008-02-09 23:24:09 +0100443 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Jeremy Fitzhardinge45c7b282009-03-20 17:53:34 -0700444 memset(bm_pte, 0, sizeof(bm_pte));
445 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a2008-02-09 23:24:09 +0100446
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100447 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100448 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100449 * we are not prepared:
450 */
Ian Campbell551889a2008-02-09 23:24:09 +0100451 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100452 WARN_ON(1);
Ian Campbell551889a2008-02-09 23:24:09 +0100453 printk(KERN_WARNING "pmd %p != %p\n",
454 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100455 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100456 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100457 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100458 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100459
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100460 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
461 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
462 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100463 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100464}
465
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100466void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100467{
Huang, Ying0947b2f2008-01-30 13:33:44 +0100468 after_paging_init = 1;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100469}
470
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100471static void __init __early_set_fixmap(enum fixed_addresses idx,
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700472 phys_addr_t phys, pgprot_t flags)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100473{
Ian Campbell551889a2008-02-09 23:24:09 +0100474 unsigned long addr = __fix_to_virt(idx);
475 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100476
477 if (idx >= __end_of_fixed_addresses) {
478 BUG();
479 return;
480 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100481 pte = early_ioremap_pte(addr);
Jeremy Fitzhardinge4583ed52008-06-25 00:19:03 -0400482
Huang, Ying0947b2f2008-01-30 13:33:44 +0100483 if (pgprot_val(flags))
Ian Campbell551889a2008-02-09 23:24:09 +0100484 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100485 else
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400486 pte_clear(&init_mm, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100487 __flush_tlb_one(addr);
488}
489
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100490static inline void __init early_set_fixmap(enum fixed_addresses idx,
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700491 phys_addr_t phys, pgprot_t prot)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100492{
493 if (after_paging_init)
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700494 __set_fixmap(idx, phys, prot);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100495 else
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700496 __early_set_fixmap(idx, phys, prot);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100497}
498
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100499static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100500{
501 if (after_paging_init)
502 clear_fixmap(idx);
503 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100504 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100505}
506
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700507static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700508static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
Wang Chen88272472009-03-07 13:34:19 +0800509
Ingo Molnard690b2a2008-01-30 13:33:47 +0100510static int __init check_early_ioremap_leak(void)
511{
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700512 int count = 0;
513 int i;
514
515 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
516 if (prev_map[i])
517 count++;
518
519 if (!count)
Ingo Molnard690b2a2008-01-30 13:33:47 +0100520 return 0;
Arjan van de Ven0c072bb2008-07-08 09:50:22 -0700521 WARN(1, KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100522 "Debug warning: early ioremap leak of %d areas detected.\n",
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700523 count);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100524 printk(KERN_WARNING
Arjan van de Ven0c072bb2008-07-08 09:50:22 -0700525 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100526
527 return 1;
528}
529late_initcall(check_early_ioremap_leak);
530
Wang Chen88272472009-03-07 13:34:19 +0800531static void __init __iomem *
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700532__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533{
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700534 unsigned long offset;
535 resource_size_t last_addr;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700536 unsigned int nrpages;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100537 enum fixed_addresses idx0, idx;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700538 int i, slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100539
540 WARN_ON(system_state != SYSTEM_BOOTING);
541
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700542 slot = -1;
543 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
544 if (!prev_map[i]) {
545 slot = i;
546 break;
547 }
548 }
549
550 if (slot < 0) {
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700551 printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
552 (u64)phys_addr, size);
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700553 WARN_ON(1);
554 return NULL;
555 }
556
Ingo Molnard18d6d62008-01-30 13:33:45 +0100557 if (early_ioremap_debug) {
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700558 printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
559 (u64)phys_addr, size, slot);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100560 dump_stack();
561 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562
563 /* Don't allow wraparound or zero size */
564 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100565 if (!size || last_addr < phys_addr) {
566 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100568 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700570 prev_size[slot] = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 /*
572 * Mappings have to be page-aligned
573 */
574 offset = phys_addr & ~PAGE_MASK;
575 phys_addr &= PAGE_MASK;
Alan Coxc613ec12008-10-10 10:46:45 +0100576 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577
578 /*
579 * Mappings have to fit in the FIX_BTMAP area.
580 */
581 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100582 if (nrpages > NR_FIX_BTMAPS) {
583 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700584 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100585 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586
587 /*
588 * Ok, go for it..
589 */
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700590 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100591 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 while (nrpages > 0) {
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700593 early_set_fixmap(idx, phys_addr, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 phys_addr += PAGE_SIZE;
595 --idx;
596 --nrpages;
597 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100598 if (early_ioremap_debug)
Wang Chen88272472009-03-07 13:34:19 +0800599 printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100600
Wang Chen88272472009-03-07 13:34:19 +0800601 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700602 return prev_map[slot];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603}
604
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700605/* Remap an IO device */
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700606void __init __iomem *
607early_ioremap(resource_size_t phys_addr, unsigned long size)
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700608{
609 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
610}
611
612/* Remap memory */
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700613void __init __iomem *
614early_memremap(resource_size_t phys_addr, unsigned long size)
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700615{
616 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
617}
618
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700619void __init early_iounmap(void __iomem *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620{
621 unsigned long virt_addr;
622 unsigned long offset;
623 unsigned int nrpages;
624 enum fixed_addresses idx;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700625 int i, slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100626
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700627 slot = -1;
628 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
629 if (prev_map[i] == addr) {
630 slot = i;
631 break;
632 }
633 }
634
635 if (slot < 0) {
636 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
637 addr, size);
638 WARN_ON(1);
Ingo Molnar226e9a92008-05-27 09:56:49 +0200639 return;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700640 }
641
642 if (prev_size[slot] != size) {
643 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
644 addr, size, slot, prev_size[slot]);
645 WARN_ON(1);
646 return;
647 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648
Ingo Molnard18d6d62008-01-30 13:33:45 +0100649 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100650 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700651 size, slot);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100652 dump_stack();
653 }
654
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100656 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
657 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700658 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100659 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700660 offset = virt_addr & ~PAGE_MASK;
661 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
662
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700663 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100665 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 --idx;
667 --nrpages;
668 }
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700669 prev_map[slot] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700670}