blob: 0be9f9c59aa6b72d1cf55a0819be146d78f56e61 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010016#include <asm/cacheflush.h>
17#include <asm/e820.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010021#include <asm/pgalloc.h>
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -070022#include <asm/pat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Thomas Gleixner240d3a72008-01-30 13:34:05 +010024#ifdef CONFIG_X86_64
25
26unsigned long __phys_addr(unsigned long x)
27{
28 if (x >= __START_KERNEL_map)
29 return x - __START_KERNEL_map + phys_base;
30 return x - PAGE_OFFSET;
31}
32EXPORT_SYMBOL(__phys_addr);
33
Thomas Gleixnere3100c82008-02-27 20:57:40 +010034static inline int phys_addr_valid(unsigned long addr)
35{
36 return addr < (1UL << boot_cpu_data.x86_phys_bits);
37}
38
39#else
40
41static inline int phys_addr_valid(unsigned long addr)
42{
43 return 1;
44}
45
Thomas Gleixner240d3a72008-01-30 13:34:05 +010046#endif
47
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010048int page_is_ram(unsigned long pagenr)
49{
Ingo Molnar756a6c62008-03-25 08:31:17 +010050 resource_size_t addr, end;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010051 int i;
52
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080053 /*
54 * A special case is the first 4Kb of memory;
55 * This is a BIOS owned area, not kernel ram, but generally
56 * not listed as such in the E820 table.
57 */
58 if (pagenr == 0)
59 return 0;
60
Arjan van de Ven156fbc32008-02-18 09:58:45 -080061 /*
62 * Second special case: Some BIOSen report the PC BIOS
63 * area (640->1Mb) as ram even though it is not.
64 */
65 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
66 pagenr < (BIOS_END >> PAGE_SHIFT))
67 return 0;
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080068
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010069 for (i = 0; i < e820.nr_map; i++) {
70 /*
71 * Not usable memory:
72 */
73 if (e820.map[i].type != E820_RAM)
74 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010075 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
76 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +010077
Thomas Gleixner950f9d92008-01-30 13:34:06 +010078
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010079 if ((pagenr >= addr) && (pagenr < end))
80 return 1;
81 }
82 return 0;
83}
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010086 * Fix up the linear direct mapping of the kernel to avoid cache attribute
87 * conflicts.
88 */
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070089int ioremap_change_attr(unsigned long vaddr, unsigned long size,
90 unsigned long prot_val)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010091{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010092 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +010093 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010094
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070095 switch (prot_val) {
96 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010097 default:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070098 err = _set_memory_uc(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010099 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700100 case _PAGE_CACHE_WC:
101 err = _set_memory_wc(vaddr, nrpages);
102 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700103 case _PAGE_CACHE_WB:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700104 err = _set_memory_wb(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100105 break;
106 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100107
108 return err;
109}
110
111/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 * Remap an arbitrary physical address space into the kernel virtual
113 * address space. Needed when the kernel wants to access high addresses
114 * directly.
115 *
116 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117 * have to convert them into an offset in a page-aligned mapping, but the
118 * caller shouldn't need to know that small detail.
119 */
Christoph Lameter23016962008-04-28 02:12:42 -0700120static void __iomem *__ioremap_caller(resource_size_t phys_addr,
121 unsigned long size, unsigned long prot_val, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
Ingo Molnar756a6c62008-03-25 08:31:17 +0100123 unsigned long pfn, offset, vaddr;
124 resource_size_t last_addr;
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100125 struct vm_struct *area;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700126 unsigned long new_prot_val;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100127 pgprot_t prot;
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700128 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129
130 /* Don't allow wraparound or zero size */
131 last_addr = phys_addr + size - 1;
132 if (!size || last_addr < phys_addr)
133 return NULL;
134
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100135 if (!phys_addr_valid(phys_addr)) {
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700136 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700137 (unsigned long long)phys_addr);
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100138 WARN_ON_ONCE(1);
139 return NULL;
140 }
141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142 /*
143 * Don't remap the low PCI/ISA area, it's always mapped..
144 */
145 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100146 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147
148 /*
149 * Don't allow anybody to remap normal RAM that we're using..
150 */
Ingo Molnar2544a872008-04-29 12:04:51 +0200151 for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped &&
152 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
Ingo Molnarbdd3cee2008-02-28 14:10:49 +0100153
Ingo Molnarba748d22008-03-03 09:37:41 +0100154 int is_ram = page_is_ram(pfn);
155
156 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100157 return NULL;
Ingo Molnarba748d22008-03-03 09:37:41 +0100158 WARN_ON_ONCE(is_ram);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 }
160
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700161 /*
162 * Mappings have to be page-aligned
163 */
164 offset = phys_addr & ~PAGE_MASK;
165 phys_addr &= PAGE_MASK;
166 size = PAGE_ALIGN(last_addr+1) - phys_addr;
167
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700168 retval = reserve_memtype(phys_addr, phys_addr + size,
169 prot_val, &new_prot_val);
170 if (retval) {
Venki Pallipadib450e5e2008-03-25 16:51:26 -0700171 pr_debug("Warning: reserve_memtype returned %d\n", retval);
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700172 return NULL;
173 }
174
175 if (prot_val != new_prot_val) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700176 /*
177 * Do not fallback to certain memory types with certain
178 * requested type:
179 * - request is uncached, return cannot be write-back
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700180 * - request is uncached, return cannot be write-combine
181 * - request is write-combine, return cannot be write-back
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700182 */
183 if ((prot_val == _PAGE_CACHE_UC &&
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700184 (new_prot_val == _PAGE_CACHE_WB ||
185 new_prot_val == _PAGE_CACHE_WC)) ||
186 (prot_val == _PAGE_CACHE_WC &&
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700187 new_prot_val == _PAGE_CACHE_WB)) {
Venki Pallipadib450e5e2008-03-25 16:51:26 -0700188 pr_debug(
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700189 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700190 (unsigned long long)phys_addr,
191 (unsigned long long)(phys_addr + size),
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700192 prot_val, new_prot_val);
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700193 free_memtype(phys_addr, phys_addr + size);
194 return NULL;
195 }
196 prot_val = new_prot_val;
197 }
198
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700199 switch (prot_val) {
200 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100201 default:
Ingo Molnar55c62682008-03-26 06:19:45 +0100202 prot = PAGE_KERNEL_NOCACHE;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100203 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700204 case _PAGE_CACHE_WC:
205 prot = PAGE_KERNEL_WC;
206 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700207 case _PAGE_CACHE_WB:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100208 prot = PAGE_KERNEL;
209 break;
210 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700211
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 * Ok, go for it..
214 */
Christoph Lameter23016962008-04-28 02:12:42 -0700215 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 if (!area)
217 return NULL;
218 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100219 vaddr = (unsigned long) area->addr;
220 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700221 free_memtype(phys_addr, phys_addr + size);
Ingo Molnarb16bf712008-02-28 14:02:08 +0100222 free_vm_area(area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223 return NULL;
224 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100225
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700226 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700227 free_memtype(phys_addr, phys_addr + size);
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100228 vunmap(area->addr);
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100229 return NULL;
230 }
231
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100232 return (void __iomem *) (vaddr + offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
235/**
236 * ioremap_nocache - map bus memory into CPU space
237 * @offset: bus address of the memory
238 * @size: size of the resource to map
239 *
240 * ioremap_nocache performs a platform specific sequence of operations to
241 * make bus memory CPU accessible via the readb/readw/readl/writeb/
242 * writew/writel functions and the other mmio helpers. The returned
243 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100244 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 *
246 * This version of ioremap ensures that the memory is marked uncachable
247 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100248 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 * busses. In particular driver authors should read up on PCI writes
250 *
251 * It's useful if some control registers are in such an area and
252 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100253 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 * Must be freed with iounmap.
255 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700256void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257{
Christoph Lameter23016962008-04-28 02:12:42 -0700258 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_UC,
259 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700261EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700263/**
264 * ioremap_wc - map memory into CPU space write combined
265 * @offset: bus address of the memory
266 * @size: size of the resource to map
267 *
268 * This version of ioremap ensures that the memory is marked write combining.
269 * Write combining allows faster writes to some hardware devices.
270 *
271 * Must be freed with iounmap.
272 */
273void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
274{
275 if (pat_wc_enabled)
Christoph Lameter23016962008-04-28 02:12:42 -0700276 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
277 __builtin_return_address(0));
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700278 else
279 return ioremap_nocache(phys_addr, size);
280}
281EXPORT_SYMBOL(ioremap_wc);
282
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700283void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100284{
Christoph Lameter23016962008-04-28 02:12:42 -0700285 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
286 __builtin_return_address(0));
Thomas Gleixner5f868152008-01-30 13:34:06 +0100287}
288EXPORT_SYMBOL(ioremap_cache);
289
Andi Kleenbf5421c2005-12-12 22:17:09 -0800290/**
291 * iounmap - Free a IO remapping
292 * @addr: virtual address from ioremap_*
293 *
294 * Caller must ensure there is only one unmapping for the same pointer.
295 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296void iounmap(volatile void __iomem *addr)
297{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800298 struct vm_struct *p, *o;
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700299
300 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 return;
302
303 /*
304 * __ioremap special-cases the PCI/ISA range by not instantiating a
305 * vm_area and by simply returning an address into the kernel mapping
306 * of ISA space. So handle that here.
307 */
308 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100309 addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310 return;
311
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100312 addr = (volatile void __iomem *)
313 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800314
315 /* Use the vm area unlocked, assuming the caller
316 ensures there isn't another iounmap for the same address
317 in parallel. Reuse of the virtual address is prevented by
318 leaving it in the global lists until we're done with it.
319 cpa takes care of the direct mappings. */
320 read_lock(&vmlist_lock);
321 for (p = vmlist; p; p = p->next) {
322 if (p->addr == addr)
323 break;
324 }
325 read_unlock(&vmlist_lock);
326
327 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100328 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700329 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800330 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 }
332
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700333 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
334
Andi Kleenbf5421c2005-12-12 22:17:09 -0800335 /* Finally remove it */
336 o = remove_vm_area((void *)addr);
337 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100338 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700340EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700342/*
343 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
344 * access
345 */
346void *xlate_dev_mem_ptr(unsigned long phys)
347{
348 void *addr;
349 unsigned long start = phys & PAGE_MASK;
350
351 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
352 if (page_is_ram(start >> PAGE_SHIFT))
353 return __va(phys);
354
355 addr = (void *)ioremap(start, PAGE_SIZE);
356 if (addr)
357 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
358
359 return addr;
360}
361
362void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
363{
364 if (page_is_ram(phys >> PAGE_SHIFT))
365 return;
366
367 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
368 return;
369}
370
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100371#ifdef CONFIG_X86_32
Ingo Molnard18d6d62008-01-30 13:33:45 +0100372
373int __initdata early_ioremap_debug;
374
375static int __init early_ioremap_debug_setup(char *str)
376{
377 early_ioremap_debug = 1;
378
Huang, Ying793b24a2008-01-30 13:33:45 +0100379 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100380}
Huang, Ying793b24a2008-01-30 13:33:45 +0100381early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100382
Huang, Ying0947b2f2008-01-30 13:33:44 +0100383static __initdata int after_paging_init;
Ian Campbellc92a7a52008-02-17 19:09:42 +0000384static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
385 __section(.bss.page_aligned);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100386
Ian Campbell551889a2008-02-09 23:24:09 +0100387static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100388{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100389 /* Don't assume we're using swapper_pg_dir at this point */
390 pgd_t *base = __va(read_cr3());
391 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a2008-02-09 23:24:09 +0100392 pud_t *pud = pud_offset(pgd, addr);
393 pmd_t *pmd = pmd_offset(pud, addr);
394
395 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100396}
397
Ian Campbell551889a2008-02-09 23:24:09 +0100398static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100399{
Ian Campbell551889a2008-02-09 23:24:09 +0100400 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100401}
402
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100403void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100404{
Ian Campbell551889a2008-02-09 23:24:09 +0100405 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100406
Ingo Molnard18d6d62008-01-30 13:33:45 +0100407 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100408 printk(KERN_INFO "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100409
Ian Campbell551889a2008-02-09 23:24:09 +0100410 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100411 memset(bm_pte, 0, sizeof(bm_pte));
Ian Campbellb6fbb662008-02-09 23:24:09 +0100412 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a2008-02-09 23:24:09 +0100413
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100414 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100415 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100416 * we are not prepared:
417 */
Ian Campbell551889a2008-02-09 23:24:09 +0100418 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100419 WARN_ON(1);
Ian Campbell551889a2008-02-09 23:24:09 +0100420 printk(KERN_WARNING "pmd %p != %p\n",
421 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100422 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100423 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100424 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100425 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100426
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100427 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
428 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
429 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100430 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100431}
432
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100433void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100434{
Ian Campbell551889a2008-02-09 23:24:09 +0100435 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100436
Ingo Molnard18d6d62008-01-30 13:33:45 +0100437 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100438 printk(KERN_INFO "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100439
Ian Campbell551889a2008-02-09 23:24:09 +0100440 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
441 pmd_clear(pmd);
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700442 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100443 __flush_tlb_all();
444}
445
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100446void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100447{
448 enum fixed_addresses idx;
Ian Campbell551889a2008-02-09 23:24:09 +0100449 unsigned long addr, phys;
450 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100451
452 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100453 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100454 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100455 pte = early_ioremap_pte(addr);
Ian Campbell551889a2008-02-09 23:24:09 +0100456 if (pte_present(*pte)) {
457 phys = pte_val(*pte) & PAGE_MASK;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100458 set_fixmap(idx, phys);
459 }
460 }
461}
462
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100463static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100464 unsigned long phys, pgprot_t flags)
465{
Ian Campbell551889a2008-02-09 23:24:09 +0100466 unsigned long addr = __fix_to_virt(idx);
467 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100468
469 if (idx >= __end_of_fixed_addresses) {
470 BUG();
471 return;
472 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100473 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100474 if (pgprot_val(flags))
Ian Campbell551889a2008-02-09 23:24:09 +0100475 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100476 else
Ian Campbell551889a2008-02-09 23:24:09 +0100477 pte_clear(NULL, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100478 __flush_tlb_one(addr);
479}
480
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100481static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100482 unsigned long phys)
483{
484 if (after_paging_init)
485 set_fixmap(idx, phys);
486 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100487 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100488}
489
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100490static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100491{
492 if (after_paging_init)
493 clear_fixmap(idx);
494 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100495 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100496}
497
Ingo Molnar1b42f512008-01-30 13:33:45 +0100498
499int __initdata early_ioremap_nested;
500
Ingo Molnard690b2a2008-01-30 13:33:47 +0100501static int __init check_early_ioremap_leak(void)
502{
503 if (!early_ioremap_nested)
504 return 0;
505
506 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100507 "Debug warning: early ioremap leak of %d areas detected.\n",
508 early_ioremap_nested);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100509 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100510 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100511 WARN_ON(1);
512
513 return 1;
514}
515late_initcall(check_early_ioremap_leak);
516
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100517void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518{
519 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100520 unsigned int nrpages, nesting;
521 enum fixed_addresses idx0, idx;
522
523 WARN_ON(system_state != SYSTEM_BOOTING);
524
525 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100526 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100527 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100528 phys_addr, size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100529 dump_stack();
530 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531
532 /* Don't allow wraparound or zero size */
533 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100534 if (!size || last_addr < phys_addr) {
535 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100537 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700538
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100539 if (nesting >= FIX_BTMAPS_NESTING) {
540 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100541 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100542 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100543 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544 /*
545 * Mappings have to be page-aligned
546 */
547 offset = phys_addr & ~PAGE_MASK;
548 phys_addr &= PAGE_MASK;
549 size = PAGE_ALIGN(last_addr) - phys_addr;
550
551 /*
552 * Mappings have to fit in the FIX_BTMAP area.
553 */
554 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100555 if (nrpages > NR_FIX_BTMAPS) {
556 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100558 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700559
560 /*
561 * Ok, go for it..
562 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100563 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
564 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100566 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700567 phys_addr += PAGE_SIZE;
568 --idx;
569 --nrpages;
570 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100571 if (early_ioremap_debug)
572 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100573
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100574 return (void *) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575}
576
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100577void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700578{
579 unsigned long virt_addr;
580 unsigned long offset;
581 unsigned int nrpages;
582 enum fixed_addresses idx;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100583 unsigned int nesting;
584
585 nesting = --early_ioremap_nested;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100586 WARN_ON(nesting < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587
Ingo Molnard18d6d62008-01-30 13:33:45 +0100588 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100589 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100590 size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100591 dump_stack();
592 }
593
Linus Torvalds1da177e2005-04-16 15:20:36 -0700594 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100595 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
596 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100598 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 offset = virt_addr & ~PAGE_MASK;
600 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
601
Ingo Molnar1b42f512008-01-30 13:33:45 +0100602 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100604 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605 --idx;
606 --nrpages;
607 }
608}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100609
610void __this_fixmap_does_not_exist(void)
611{
612 WARN_ON(1);
613}
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100614
615#endif /* CONFIG_X86_32 */