blob: 7338c5d3dd377ef98dfccd6fa7b4422974111844 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010016#include <asm/cacheflush.h>
17#include <asm/e820.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010021#include <asm/pgalloc.h>
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -070022#include <asm/pat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Thomas Gleixner240d3a72008-01-30 13:34:05 +010024#ifdef CONFIG_X86_64
25
26unsigned long __phys_addr(unsigned long x)
27{
28 if (x >= __START_KERNEL_map)
29 return x - __START_KERNEL_map + phys_base;
30 return x - PAGE_OFFSET;
31}
32EXPORT_SYMBOL(__phys_addr);
33
Thomas Gleixnere3100c82008-02-27 20:57:40 +010034static inline int phys_addr_valid(unsigned long addr)
35{
36 return addr < (1UL << boot_cpu_data.x86_phys_bits);
37}
38
39#else
40
41static inline int phys_addr_valid(unsigned long addr)
42{
43 return 1;
44}
45
Thomas Gleixner240d3a72008-01-30 13:34:05 +010046#endif
47
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010048int page_is_ram(unsigned long pagenr)
49{
50 unsigned long addr, end;
51 int i;
52
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080053 /*
54 * A special case is the first 4Kb of memory;
55 * This is a BIOS owned area, not kernel ram, but generally
56 * not listed as such in the E820 table.
57 */
58 if (pagenr == 0)
59 return 0;
60
Arjan van de Ven156fbc32008-02-18 09:58:45 -080061 /*
62 * Second special case: Some BIOSen report the PC BIOS
63 * area (640->1Mb) as ram even though it is not.
64 */
65 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
66 pagenr < (BIOS_END >> PAGE_SHIFT))
67 return 0;
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080068
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010069 for (i = 0; i < e820.nr_map; i++) {
70 /*
71 * Not usable memory:
72 */
73 if (e820.map[i].type != E820_RAM)
74 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010075 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
76 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +010077
Thomas Gleixner950f9d92008-01-30 13:34:06 +010078
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010079 if ((pagenr >= addr) && (pagenr < end))
80 return 1;
81 }
82 return 0;
83}
84
Linus Torvalds1da177e2005-04-16 15:20:36 -070085/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010086 * Fix up the linear direct mapping of the kernel to avoid cache attribute
87 * conflicts.
88 */
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070089int ioremap_change_attr(unsigned long vaddr, unsigned long size,
90 unsigned long prot_val)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010091{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010092 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +010093 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010094
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070095 switch (prot_val) {
96 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010097 default:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070098 err = _set_memory_uc(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010099 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700100 case _PAGE_CACHE_WC:
101 err = _set_memory_wc(vaddr, nrpages);
102 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700103 case _PAGE_CACHE_WB:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700104 err = _set_memory_wb(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100105 break;
106 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100107
108 return err;
109}
110
111/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 * Remap an arbitrary physical address space into the kernel virtual
113 * address space. Needed when the kernel wants to access high addresses
114 * directly.
115 *
116 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
117 * have to convert them into an offset in a page-aligned mapping, but the
118 * caller shouldn't need to know that small detail.
119 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700120static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700121 unsigned long prot_val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122{
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100123 unsigned long pfn, offset, last_addr, vaddr;
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100124 struct vm_struct *area;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700125 unsigned long new_prot_val;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100126 pgprot_t prot;
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700127 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128
129 /* Don't allow wraparound or zero size */
130 last_addr = phys_addr + size - 1;
131 if (!size || last_addr < phys_addr)
132 return NULL;
133
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100134 if (!phys_addr_valid(phys_addr)) {
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700135 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100136 phys_addr);
137 WARN_ON_ONCE(1);
138 return NULL;
139 }
140
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 /*
142 * Don't remap the low PCI/ISA area, it's always mapped..
143 */
144 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100145 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
147 /*
148 * Don't allow anybody to remap normal RAM that we're using..
149 */
Ingo Molnarbdd3cee2008-02-28 14:10:49 +0100150 for (pfn = phys_addr >> PAGE_SHIFT;
151 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
152
Ingo Molnarba748d22008-03-03 09:37:41 +0100153 int is_ram = page_is_ram(pfn);
154
155 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100156 return NULL;
Ingo Molnarba748d22008-03-03 09:37:41 +0100157 WARN_ON_ONCE(is_ram);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 }
159
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700160 /*
161 * Mappings have to be page-aligned
162 */
163 offset = phys_addr & ~PAGE_MASK;
164 phys_addr &= PAGE_MASK;
165 size = PAGE_ALIGN(last_addr+1) - phys_addr;
166
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700167 retval = reserve_memtype(phys_addr, phys_addr + size,
168 prot_val, &new_prot_val);
169 if (retval) {
Venki Pallipadib450e5e2008-03-25 16:51:26 -0700170 pr_debug("Warning: reserve_memtype returned %d\n", retval);
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700171 return NULL;
172 }
173
174 if (prot_val != new_prot_val) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700175 /*
176 * Do not fallback to certain memory types with certain
177 * requested type:
178 * - request is uncached, return cannot be write-back
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700179 * - request is uncached, return cannot be write-combine
180 * - request is write-combine, return cannot be write-back
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700181 */
182 if ((prot_val == _PAGE_CACHE_UC &&
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700183 (new_prot_val == _PAGE_CACHE_WB ||
184 new_prot_val == _PAGE_CACHE_WC)) ||
185 (prot_val == _PAGE_CACHE_WC &&
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700186 new_prot_val == _PAGE_CACHE_WB)) {
Venki Pallipadib450e5e2008-03-25 16:51:26 -0700187 pr_debug(
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700188 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
189 phys_addr, phys_addr + size,
190 prot_val, new_prot_val);
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700191 free_memtype(phys_addr, phys_addr + size);
192 return NULL;
193 }
194 prot_val = new_prot_val;
195 }
196
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700197 switch (prot_val) {
198 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100199 default:
Ingo Molnar55c62682008-03-26 06:19:45 +0100200 prot = PAGE_KERNEL_NOCACHE;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100201 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700202 case _PAGE_CACHE_WC:
203 prot = PAGE_KERNEL_WC;
204 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700205 case _PAGE_CACHE_WB:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100206 prot = PAGE_KERNEL;
207 break;
208 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700209
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 * Ok, go for it..
212 */
Thomas Gleixner74ff2852008-01-30 13:34:05 +0100213 area = get_vm_area(size, VM_IOREMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 if (!area)
215 return NULL;
216 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100217 vaddr = (unsigned long) area->addr;
218 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700219 free_memtype(phys_addr, phys_addr + size);
Ingo Molnarb16bf712008-02-28 14:02:08 +0100220 free_vm_area(area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 return NULL;
222 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100223
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700224 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700225 free_memtype(phys_addr, phys_addr + size);
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100226 vunmap(area->addr);
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100227 return NULL;
228 }
229
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100230 return (void __iomem *) (vaddr + offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232
233/**
234 * ioremap_nocache - map bus memory into CPU space
235 * @offset: bus address of the memory
236 * @size: size of the resource to map
237 *
238 * ioremap_nocache performs a platform specific sequence of operations to
239 * make bus memory CPU accessible via the readb/readw/readl/writeb/
240 * writew/writel functions and the other mmio helpers. The returned
241 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100242 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700243 *
244 * This version of ioremap ensures that the memory is marked uncachable
245 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100246 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247 * busses. In particular driver authors should read up on PCI writes
248 *
249 * It's useful if some control registers are in such an area and
250 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100251 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 * Must be freed with iounmap.
253 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700254void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255{
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700256 return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700258EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700260/**
261 * ioremap_wc - map memory into CPU space write combined
262 * @offset: bus address of the memory
263 * @size: size of the resource to map
264 *
265 * This version of ioremap ensures that the memory is marked write combining.
266 * Write combining allows faster writes to some hardware devices.
267 *
268 * Must be freed with iounmap.
269 */
270void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
271{
272 if (pat_wc_enabled)
273 return __ioremap(phys_addr, size, _PAGE_CACHE_WC);
274 else
275 return ioremap_nocache(phys_addr, size);
276}
277EXPORT_SYMBOL(ioremap_wc);
278
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700279void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100280{
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700281 return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
Thomas Gleixner5f868152008-01-30 13:34:06 +0100282}
283EXPORT_SYMBOL(ioremap_cache);
284
Andi Kleenbf5421c2005-12-12 22:17:09 -0800285/**
286 * iounmap - Free a IO remapping
287 * @addr: virtual address from ioremap_*
288 *
289 * Caller must ensure there is only one unmapping for the same pointer.
290 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700291void iounmap(volatile void __iomem *addr)
292{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800293 struct vm_struct *p, *o;
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700294
295 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296 return;
297
298 /*
299 * __ioremap special-cases the PCI/ISA range by not instantiating a
300 * vm_area and by simply returning an address into the kernel mapping
301 * of ISA space. So handle that here.
302 */
303 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100304 addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 return;
306
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100307 addr = (volatile void __iomem *)
308 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800309
310 /* Use the vm area unlocked, assuming the caller
311 ensures there isn't another iounmap for the same address
312 in parallel. Reuse of the virtual address is prevented by
313 leaving it in the global lists until we're done with it.
314 cpa takes care of the direct mappings. */
315 read_lock(&vmlist_lock);
316 for (p = vmlist; p; p = p->next) {
317 if (p->addr == addr)
318 break;
319 }
320 read_unlock(&vmlist_lock);
321
322 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100323 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700324 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800325 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326 }
327
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700328 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
329
Andi Kleenbf5421c2005-12-12 22:17:09 -0800330 /* Finally remove it */
331 o = remove_vm_area((void *)addr);
332 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100333 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700335EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100337#ifdef CONFIG_X86_32
Ingo Molnard18d6d62008-01-30 13:33:45 +0100338
339int __initdata early_ioremap_debug;
340
341static int __init early_ioremap_debug_setup(char *str)
342{
343 early_ioremap_debug = 1;
344
Huang, Ying793b24a2008-01-30 13:33:45 +0100345 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100346}
Huang, Ying793b24a2008-01-30 13:33:45 +0100347early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100348
Huang, Ying0947b2f2008-01-30 13:33:44 +0100349static __initdata int after_paging_init;
Ian Campbellc92a7a52008-02-17 19:09:42 +0000350static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
351 __section(.bss.page_aligned);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100352
Ian Campbell551889a2008-02-09 23:24:09 +0100353static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100354{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100355 /* Don't assume we're using swapper_pg_dir at this point */
356 pgd_t *base = __va(read_cr3());
357 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a2008-02-09 23:24:09 +0100358 pud_t *pud = pud_offset(pgd, addr);
359 pmd_t *pmd = pmd_offset(pud, addr);
360
361 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100362}
363
Ian Campbell551889a2008-02-09 23:24:09 +0100364static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100365{
Ian Campbell551889a2008-02-09 23:24:09 +0100366 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100367}
368
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100369void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100370{
Ian Campbell551889a2008-02-09 23:24:09 +0100371 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100372
Ingo Molnard18d6d62008-01-30 13:33:45 +0100373 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100374 printk(KERN_INFO "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100375
Ian Campbell551889a2008-02-09 23:24:09 +0100376 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100377 memset(bm_pte, 0, sizeof(bm_pte));
Ian Campbellb6fbb662008-02-09 23:24:09 +0100378 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a2008-02-09 23:24:09 +0100379
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100380 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100381 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100382 * we are not prepared:
383 */
Ian Campbell551889a2008-02-09 23:24:09 +0100384 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100385 WARN_ON(1);
Ian Campbell551889a2008-02-09 23:24:09 +0100386 printk(KERN_WARNING "pmd %p != %p\n",
387 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100388 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100389 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100390 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100391 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100392
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100393 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
394 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
395 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100396 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100397}
398
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100399void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100400{
Ian Campbell551889a2008-02-09 23:24:09 +0100401 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100402
Ingo Molnard18d6d62008-01-30 13:33:45 +0100403 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100404 printk(KERN_INFO "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100405
Ian Campbell551889a2008-02-09 23:24:09 +0100406 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
407 pmd_clear(pmd);
Ian Campbellb6fbb662008-02-09 23:24:09 +0100408 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100409 __flush_tlb_all();
410}
411
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100412void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100413{
414 enum fixed_addresses idx;
Ian Campbell551889a2008-02-09 23:24:09 +0100415 unsigned long addr, phys;
416 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100417
418 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100419 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100420 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100421 pte = early_ioremap_pte(addr);
Ian Campbell551889a2008-02-09 23:24:09 +0100422 if (pte_present(*pte)) {
423 phys = pte_val(*pte) & PAGE_MASK;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100424 set_fixmap(idx, phys);
425 }
426 }
427}
428
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100429static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100430 unsigned long phys, pgprot_t flags)
431{
Ian Campbell551889a2008-02-09 23:24:09 +0100432 unsigned long addr = __fix_to_virt(idx);
433 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100434
435 if (idx >= __end_of_fixed_addresses) {
436 BUG();
437 return;
438 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100439 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100440 if (pgprot_val(flags))
Ian Campbell551889a2008-02-09 23:24:09 +0100441 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100442 else
Ian Campbell551889a2008-02-09 23:24:09 +0100443 pte_clear(NULL, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100444 __flush_tlb_one(addr);
445}
446
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100447static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100448 unsigned long phys)
449{
450 if (after_paging_init)
451 set_fixmap(idx, phys);
452 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100453 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100454}
455
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100456static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100457{
458 if (after_paging_init)
459 clear_fixmap(idx);
460 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100461 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100462}
463
Ingo Molnar1b42f512008-01-30 13:33:45 +0100464
465int __initdata early_ioremap_nested;
466
Ingo Molnard690b2a2008-01-30 13:33:47 +0100467static int __init check_early_ioremap_leak(void)
468{
469 if (!early_ioremap_nested)
470 return 0;
471
472 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100473 "Debug warning: early ioremap leak of %d areas detected.\n",
474 early_ioremap_nested);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100475 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100476 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100477 WARN_ON(1);
478
479 return 1;
480}
481late_initcall(check_early_ioremap_leak);
482
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100483void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
485 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100486 unsigned int nrpages, nesting;
487 enum fixed_addresses idx0, idx;
488
489 WARN_ON(system_state != SYSTEM_BOOTING);
490
491 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100492 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100493 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100494 phys_addr, size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100495 dump_stack();
496 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
498 /* Don't allow wraparound or zero size */
499 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100500 if (!size || last_addr < phys_addr) {
501 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100503 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100505 if (nesting >= FIX_BTMAPS_NESTING) {
506 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100507 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100508 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100509 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 /*
511 * Mappings have to be page-aligned
512 */
513 offset = phys_addr & ~PAGE_MASK;
514 phys_addr &= PAGE_MASK;
515 size = PAGE_ALIGN(last_addr) - phys_addr;
516
517 /*
518 * Mappings have to fit in the FIX_BTMAP area.
519 */
520 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100521 if (nrpages > NR_FIX_BTMAPS) {
522 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700523 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100524 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525
526 /*
527 * Ok, go for it..
528 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100529 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
530 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700531 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100532 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700533 phys_addr += PAGE_SIZE;
534 --idx;
535 --nrpages;
536 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100537 if (early_ioremap_debug)
538 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100539
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100540 return (void *) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541}
542
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100543void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544{
545 unsigned long virt_addr;
546 unsigned long offset;
547 unsigned int nrpages;
548 enum fixed_addresses idx;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100549 unsigned int nesting;
550
551 nesting = --early_ioremap_nested;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100552 WARN_ON(nesting < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700553
Ingo Molnard18d6d62008-01-30 13:33:45 +0100554 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100555 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100556 size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100557 dump_stack();
558 }
559
Linus Torvalds1da177e2005-04-16 15:20:36 -0700560 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100561 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
562 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700563 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100564 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700565 offset = virt_addr & ~PAGE_MASK;
566 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
567
Ingo Molnar1b42f512008-01-30 13:33:45 +0100568 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100570 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 --idx;
572 --nrpages;
573 }
574}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100575
576void __this_fixmap_does_not_exist(void)
577{
578 WARN_ON(1);
579}
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100580
581#endif /* CONFIG_X86_32 */