blob: d03c461e045e7402135c2bb086e6e5d040362f3f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Pekka Paalanend61fc442008-05-12 21:20:57 +020015#include <linux/mmiotrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010017#include <asm/cacheflush.h>
18#include <asm/e820.h>
19#include <asm/fixmap.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010022#include <asm/pgalloc.h>
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -070023#include <asm/pat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Thomas Gleixner240d3a72008-01-30 13:34:05 +010025#ifdef CONFIG_X86_64
26
27unsigned long __phys_addr(unsigned long x)
28{
29 if (x >= __START_KERNEL_map)
30 return x - __START_KERNEL_map + phys_base;
31 return x - PAGE_OFFSET;
32}
33EXPORT_SYMBOL(__phys_addr);
34
Thomas Gleixnere3100c82008-02-27 20:57:40 +010035static inline int phys_addr_valid(unsigned long addr)
36{
37 return addr < (1UL << boot_cpu_data.x86_phys_bits);
38}
39
40#else
41
42static inline int phys_addr_valid(unsigned long addr)
43{
44 return 1;
45}
46
Thomas Gleixner240d3a72008-01-30 13:34:05 +010047#endif
48
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010049int page_is_ram(unsigned long pagenr)
50{
Ingo Molnar756a6c62008-03-25 08:31:17 +010051 resource_size_t addr, end;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010052 int i;
53
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080054 /*
55 * A special case is the first 4Kb of memory;
56 * This is a BIOS owned area, not kernel ram, but generally
57 * not listed as such in the E820 table.
58 */
59 if (pagenr == 0)
60 return 0;
61
Arjan van de Ven156fbc32008-02-18 09:58:45 -080062 /*
63 * Second special case: Some BIOSen report the PC BIOS
64 * area (640->1Mb) as ram even though it is not.
65 */
66 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
67 pagenr < (BIOS_END >> PAGE_SHIFT))
68 return 0;
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080069
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010070 for (i = 0; i < e820.nr_map; i++) {
71 /*
72 * Not usable memory:
73 */
74 if (e820.map[i].type != E820_RAM)
75 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010076 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
77 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +010078
Thomas Gleixner950f9d92008-01-30 13:34:06 +010079
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010080 if ((pagenr >= addr) && (pagenr < end))
81 return 1;
82 }
83 return 0;
84}
85
Suresh Siddha9542ada2008-09-24 08:53:33 -070086int pagerange_is_ram(unsigned long start, unsigned long end)
87{
88 int ram_page = 0, not_rampage = 0;
89 unsigned long page_nr;
90
91 for (page_nr = (start >> PAGE_SHIFT); page_nr < (end >> PAGE_SHIFT);
92 ++page_nr) {
93 if (page_is_ram(page_nr))
94 ram_page = 1;
95 else
96 not_rampage = 1;
97
98 if (ram_page == not_rampage)
99 return -1;
100 }
101
102 return ram_page;
103}
104
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100106 * Fix up the linear direct mapping of the kernel to avoid cache attribute
107 * conflicts.
108 */
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700109int ioremap_change_attr(unsigned long vaddr, unsigned long size,
110 unsigned long prot_val)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100111{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100112 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +0100113 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100114
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700115 switch (prot_val) {
116 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100117 default:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700118 err = _set_memory_uc(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100119 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700120 case _PAGE_CACHE_WC:
121 err = _set_memory_wc(vaddr, nrpages);
122 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700123 case _PAGE_CACHE_WB:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700124 err = _set_memory_wb(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100125 break;
126 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100127
128 return err;
129}
130
131/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 * Remap an arbitrary physical address space into the kernel virtual
133 * address space. Needed when the kernel wants to access high addresses
134 * directly.
135 *
136 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
137 * have to convert them into an offset in a page-aligned mapping, but the
138 * caller shouldn't need to know that small detail.
139 */
Christoph Lameter23016962008-04-28 02:12:42 -0700140static void __iomem *__ioremap_caller(resource_size_t phys_addr,
141 unsigned long size, unsigned long prot_val, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142{
Ingo Molnar756a6c62008-03-25 08:31:17 +0100143 unsigned long pfn, offset, vaddr;
144 resource_size_t last_addr;
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200145 const resource_size_t unaligned_phys_addr = phys_addr;
146 const unsigned long unaligned_size = size;
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100147 struct vm_struct *area;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700148 unsigned long new_prot_val;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100149 pgprot_t prot;
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700150 int retval;
Pekka Paalanend61fc442008-05-12 21:20:57 +0200151 void __iomem *ret_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152
153 /* Don't allow wraparound or zero size */
154 last_addr = phys_addr + size - 1;
155 if (!size || last_addr < phys_addr)
156 return NULL;
157
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100158 if (!phys_addr_valid(phys_addr)) {
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700159 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700160 (unsigned long long)phys_addr);
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100161 WARN_ON_ONCE(1);
162 return NULL;
163 }
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 /*
166 * Don't remap the low PCI/ISA area, it's always mapped..
167 */
Andreas Herrmannbcc643d2008-06-20 21:58:46 +0200168 if (is_ISA_range(phys_addr, last_addr))
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100169 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170
171 /*
172 * Don't allow anybody to remap normal RAM that we're using..
173 */
Andres Salomoncb8ab682008-04-30 11:30:24 -0400174 for (pfn = phys_addr >> PAGE_SHIFT;
175 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
176 pfn++) {
Ingo Molnarbdd3cee2008-02-28 14:10:49 +0100177
Ingo Molnarba748d22008-03-03 09:37:41 +0100178 int is_ram = page_is_ram(pfn);
179
180 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100181 return NULL;
Ingo Molnarba748d22008-03-03 09:37:41 +0100182 WARN_ON_ONCE(is_ram);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 }
184
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700185 /*
186 * Mappings have to be page-aligned
187 */
188 offset = phys_addr & ~PAGE_MASK;
189 phys_addr &= PAGE_MASK;
190 size = PAGE_ALIGN(last_addr+1) - phys_addr;
191
Andi Kleene213e872008-08-15 18:12:47 +0200192 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700193 prot_val, &new_prot_val);
194 if (retval) {
Venki Pallipadib450e5e2008-03-25 16:51:26 -0700195 pr_debug("Warning: reserve_memtype returned %d\n", retval);
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700196 return NULL;
197 }
198
199 if (prot_val != new_prot_val) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700200 /*
201 * Do not fallback to certain memory types with certain
202 * requested type:
Suresh Siddhade33c442008-04-25 17:07:22 -0700203 * - request is uc-, return cannot be write-back
204 * - request is uc-, return cannot be write-combine
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700205 * - request is write-combine, return cannot be write-back
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700206 */
Suresh Siddhade33c442008-04-25 17:07:22 -0700207 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700208 (new_prot_val == _PAGE_CACHE_WB ||
209 new_prot_val == _PAGE_CACHE_WC)) ||
210 (prot_val == _PAGE_CACHE_WC &&
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700211 new_prot_val == _PAGE_CACHE_WB)) {
Venki Pallipadib450e5e2008-03-25 16:51:26 -0700212 pr_debug(
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700213 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700214 (unsigned long long)phys_addr,
215 (unsigned long long)(phys_addr + size),
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700216 prot_val, new_prot_val);
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700217 free_memtype(phys_addr, phys_addr + size);
218 return NULL;
219 }
220 prot_val = new_prot_val;
221 }
222
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700223 switch (prot_val) {
224 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100225 default:
Ingo Molnar55c62682008-03-26 06:19:45 +0100226 prot = PAGE_KERNEL_NOCACHE;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100227 break;
Suresh Siddhade33c442008-04-25 17:07:22 -0700228 case _PAGE_CACHE_UC_MINUS:
229 prot = PAGE_KERNEL_UC_MINUS;
230 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700231 case _PAGE_CACHE_WC:
232 prot = PAGE_KERNEL_WC;
233 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700234 case _PAGE_CACHE_WB:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100235 prot = PAGE_KERNEL;
236 break;
237 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700238
Linus Torvalds1da177e2005-04-16 15:20:36 -0700239 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 * Ok, go for it..
241 */
Christoph Lameter23016962008-04-28 02:12:42 -0700242 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 if (!area)
244 return NULL;
245 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100246 vaddr = (unsigned long) area->addr;
247 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700248 free_memtype(phys_addr, phys_addr + size);
Ingo Molnarb16bf712008-02-28 14:02:08 +0100249 free_vm_area(area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250 return NULL;
251 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100252
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700253 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700254 free_memtype(phys_addr, phys_addr + size);
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100255 vunmap(area->addr);
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100256 return NULL;
257 }
258
Pekka Paalanend61fc442008-05-12 21:20:57 +0200259 ret_addr = (void __iomem *) (vaddr + offset);
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200260 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
Pekka Paalanend61fc442008-05-12 21:20:57 +0200261
262 return ret_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264
265/**
266 * ioremap_nocache - map bus memory into CPU space
267 * @offset: bus address of the memory
268 * @size: size of the resource to map
269 *
270 * ioremap_nocache performs a platform specific sequence of operations to
271 * make bus memory CPU accessible via the readb/readw/readl/writeb/
272 * writew/writel functions and the other mmio helpers. The returned
273 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100274 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 *
276 * This version of ioremap ensures that the memory is marked uncachable
277 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100278 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 * busses. In particular driver authors should read up on PCI writes
280 *
281 * It's useful if some control registers are in such an area and
282 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100283 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 * Must be freed with iounmap.
285 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700286void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
Suresh Siddhade33c442008-04-25 17:07:22 -0700288 /*
289 * Ideally, this should be:
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200290 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700291 *
292 * Till we fix all X drivers to use ioremap_wc(), we will use
293 * UC MINUS.
294 */
295 unsigned long val = _PAGE_CACHE_UC_MINUS;
296
297 return __ioremap_caller(phys_addr, size, val,
Christoph Lameter23016962008-04-28 02:12:42 -0700298 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700300EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700302/**
303 * ioremap_wc - map memory into CPU space write combined
304 * @offset: bus address of the memory
305 * @size: size of the resource to map
306 *
307 * This version of ioremap ensures that the memory is marked write combining.
308 * Write combining allows faster writes to some hardware devices.
309 *
310 * Must be freed with iounmap.
311 */
312void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
313{
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200314 if (pat_enabled)
Christoph Lameter23016962008-04-28 02:12:42 -0700315 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
316 __builtin_return_address(0));
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700317 else
318 return ioremap_nocache(phys_addr, size);
319}
320EXPORT_SYMBOL(ioremap_wc);
321
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700322void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100323{
Christoph Lameter23016962008-04-28 02:12:42 -0700324 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
325 __builtin_return_address(0));
Thomas Gleixner5f868152008-01-30 13:34:06 +0100326}
327EXPORT_SYMBOL(ioremap_cache);
328
Venkatesh Pallipadia361ee52008-07-10 10:09:59 +0200329static void __iomem *ioremap_default(resource_size_t phys_addr,
330 unsigned long size)
331{
332 unsigned long flags;
333 void *ret;
334 int err;
335
336 /*
337 * - WB for WB-able memory and no other conflicting mappings
338 * - UC_MINUS for non-WB-able memory with no other conflicting mappings
339 * - Inherit from confliting mappings otherwise
340 */
341 err = reserve_memtype(phys_addr, phys_addr + size, -1, &flags);
342 if (err < 0)
343 return NULL;
344
345 ret = (void *) __ioremap_caller(phys_addr, size, flags,
346 __builtin_return_address(0));
347
348 free_memtype(phys_addr, phys_addr + size);
349 return (void __iomem *)ret;
350}
351
Rik van Riel28b2ee22008-07-23 21:27:05 -0700352void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
353 unsigned long prot_val)
354{
355 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
356 __builtin_return_address(0));
357}
358EXPORT_SYMBOL(ioremap_prot);
359
Andi Kleenbf5421c2005-12-12 22:17:09 -0800360/**
361 * iounmap - Free a IO remapping
362 * @addr: virtual address from ioremap_*
363 *
364 * Caller must ensure there is only one unmapping for the same pointer.
365 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366void iounmap(volatile void __iomem *addr)
367{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800368 struct vm_struct *p, *o;
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700369
370 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 return;
372
373 /*
374 * __ioremap special-cases the PCI/ISA range by not instantiating a
375 * vm_area and by simply returning an address into the kernel mapping
376 * of ISA space. So handle that here.
377 */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200378 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
379 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 return;
381
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100382 addr = (volatile void __iomem *)
383 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800384
Pekka Paalanend61fc442008-05-12 21:20:57 +0200385 mmiotrace_iounmap(addr);
386
Andi Kleenbf5421c2005-12-12 22:17:09 -0800387 /* Use the vm area unlocked, assuming the caller
388 ensures there isn't another iounmap for the same address
389 in parallel. Reuse of the virtual address is prevented by
390 leaving it in the global lists until we're done with it.
391 cpa takes care of the direct mappings. */
392 read_lock(&vmlist_lock);
393 for (p = vmlist; p; p = p->next) {
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200394 if (p->addr == (void __force *)addr)
Andi Kleenbf5421c2005-12-12 22:17:09 -0800395 break;
396 }
397 read_unlock(&vmlist_lock);
398
399 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100400 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700401 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800402 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403 }
404
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700405 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
406
Andi Kleenbf5421c2005-12-12 22:17:09 -0800407 /* Finally remove it */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200408 o = remove_vm_area((void __force *)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800409 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100410 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700412EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700414/*
415 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
416 * access
417 */
418void *xlate_dev_mem_ptr(unsigned long phys)
419{
420 void *addr;
421 unsigned long start = phys & PAGE_MASK;
422
423 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
424 if (page_is_ram(start >> PAGE_SHIFT))
425 return __va(phys);
426
Ingo Molnarae94b802008-07-12 07:29:02 +0200427 addr = (void __force *)ioremap_default(start, PAGE_SIZE);
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700428 if (addr)
429 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
430
431 return addr;
432}
433
434void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
435{
436 if (page_is_ram(phys >> PAGE_SHIFT))
437 return;
438
439 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
440 return;
441}
442
Ingo Molnard18d6d62008-01-30 13:33:45 +0100443int __initdata early_ioremap_debug;
444
445static int __init early_ioremap_debug_setup(char *str)
446{
447 early_ioremap_debug = 1;
448
Huang, Ying793b24a2008-01-30 13:33:45 +0100449 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100450}
Huang, Ying793b24a2008-01-30 13:33:45 +0100451early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100452
Huang, Ying0947b2f2008-01-30 13:33:44 +0100453static __initdata int after_paging_init;
Jeremy Fitzhardingea7bf0bd2008-05-28 15:02:14 +0100454static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100455
Ian Campbell551889a2008-02-09 23:24:09 +0100456static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100457{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100458 /* Don't assume we're using swapper_pg_dir at this point */
459 pgd_t *base = __va(read_cr3());
460 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a2008-02-09 23:24:09 +0100461 pud_t *pud = pud_offset(pgd, addr);
462 pmd_t *pmd = pmd_offset(pud, addr);
463
464 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100465}
466
Ian Campbell551889a2008-02-09 23:24:09 +0100467static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100468{
Ian Campbell551889a2008-02-09 23:24:09 +0100469 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100470}
471
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100472void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100473{
Ian Campbell551889a2008-02-09 23:24:09 +0100474 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100475
Ingo Molnard18d6d62008-01-30 13:33:45 +0100476 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100477 printk(KERN_INFO "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100478
Ian Campbell551889a2008-02-09 23:24:09 +0100479 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100480 memset(bm_pte, 0, sizeof(bm_pte));
Ian Campbellb6fbb662008-02-09 23:24:09 +0100481 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a2008-02-09 23:24:09 +0100482
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100483 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100484 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100485 * we are not prepared:
486 */
Ian Campbell551889a2008-02-09 23:24:09 +0100487 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100488 WARN_ON(1);
Ian Campbell551889a2008-02-09 23:24:09 +0100489 printk(KERN_WARNING "pmd %p != %p\n",
490 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100491 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100492 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100493 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100494 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100495
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100496 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
497 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
498 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100499 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100500}
501
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100502void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100503{
Ian Campbell551889a2008-02-09 23:24:09 +0100504 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100505
Ingo Molnard18d6d62008-01-30 13:33:45 +0100506 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100507 printk(KERN_INFO "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100508
Ian Campbell551889a2008-02-09 23:24:09 +0100509 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
510 pmd_clear(pmd);
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700511 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100512 __flush_tlb_all();
513}
514
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100515void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100516{
517 enum fixed_addresses idx;
Ian Campbell551889a2008-02-09 23:24:09 +0100518 unsigned long addr, phys;
519 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100520
521 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100522 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100523 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100524 pte = early_ioremap_pte(addr);
Ian Campbell551889a2008-02-09 23:24:09 +0100525 if (pte_present(*pte)) {
526 phys = pte_val(*pte) & PAGE_MASK;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100527 set_fixmap(idx, phys);
528 }
529 }
530}
531
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100532static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100533 unsigned long phys, pgprot_t flags)
534{
Ian Campbell551889a2008-02-09 23:24:09 +0100535 unsigned long addr = __fix_to_virt(idx);
536 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100537
538 if (idx >= __end_of_fixed_addresses) {
539 BUG();
540 return;
541 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100542 pte = early_ioremap_pte(addr);
Jeremy Fitzhardinge4583ed52008-06-25 00:19:03 -0400543
Huang, Ying0947b2f2008-01-30 13:33:44 +0100544 if (pgprot_val(flags))
Ian Campbell551889a2008-02-09 23:24:09 +0100545 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100546 else
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400547 pte_clear(&init_mm, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100548 __flush_tlb_one(addr);
549}
550
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100551static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100552 unsigned long phys)
553{
554 if (after_paging_init)
555 set_fixmap(idx, phys);
556 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100557 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100558}
559
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100560static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100561{
562 if (after_paging_init)
563 clear_fixmap(idx);
564 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100565 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100566}
567
Ingo Molnar1b42f512008-01-30 13:33:45 +0100568
569int __initdata early_ioremap_nested;
570
Ingo Molnard690b2a2008-01-30 13:33:47 +0100571static int __init check_early_ioremap_leak(void)
572{
573 if (!early_ioremap_nested)
574 return 0;
Arjan van de Ven0c072bb2008-07-08 09:50:22 -0700575 WARN(1, KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100576 "Debug warning: early ioremap leak of %d areas detected.\n",
Arjan van de Ven0c072bb2008-07-08 09:50:22 -0700577 early_ioremap_nested);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100578 printk(KERN_WARNING
Arjan van de Ven0c072bb2008-07-08 09:50:22 -0700579 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100580
581 return 1;
582}
583late_initcall(check_early_ioremap_leak);
584
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100585void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586{
587 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100588 unsigned int nrpages, nesting;
589 enum fixed_addresses idx0, idx;
590
591 WARN_ON(system_state != SYSTEM_BOOTING);
592
593 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100594 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100595 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100596 phys_addr, size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100597 dump_stack();
598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599
600 /* Don't allow wraparound or zero size */
601 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100602 if (!size || last_addr < phys_addr) {
603 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100605 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700606
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100607 if (nesting >= FIX_BTMAPS_NESTING) {
608 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100609 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100610 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100611 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700612 /*
613 * Mappings have to be page-aligned
614 */
615 offset = phys_addr & ~PAGE_MASK;
616 phys_addr &= PAGE_MASK;
617 size = PAGE_ALIGN(last_addr) - phys_addr;
618
619 /*
620 * Mappings have to fit in the FIX_BTMAP area.
621 */
622 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100623 if (nrpages > NR_FIX_BTMAPS) {
624 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100626 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700627
628 /*
629 * Ok, go for it..
630 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100631 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
632 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100634 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 phys_addr += PAGE_SIZE;
636 --idx;
637 --nrpages;
638 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100639 if (early_ioremap_debug)
640 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100641
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100642 return (void *) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643}
644
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100645void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700646{
647 unsigned long virt_addr;
648 unsigned long offset;
649 unsigned int nrpages;
650 enum fixed_addresses idx;
Ingo Molnar226e9a92008-05-27 09:56:49 +0200651 int nesting;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100652
653 nesting = --early_ioremap_nested;
Ingo Molnar226e9a92008-05-27 09:56:49 +0200654 if (WARN_ON(nesting < 0))
655 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700656
Ingo Molnard18d6d62008-01-30 13:33:45 +0100657 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100658 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100659 size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100660 dump_stack();
661 }
662
Linus Torvalds1da177e2005-04-16 15:20:36 -0700663 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100664 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
665 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700666 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100667 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700668 offset = virt_addr & ~PAGE_MASK;
669 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
670
Ingo Molnar1b42f512008-01-30 13:33:45 +0100671 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100673 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700674 --idx;
675 --nrpages;
676 }
677}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100678
679void __this_fixmap_does_not_exist(void)
680{
681 WARN_ON(1);
682}