blob: f4c95aec5acb211e182970b6d13f9a53f2fda557 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010016#include <asm/cacheflush.h>
17#include <asm/e820.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010021#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010023enum ioremap_mode {
24 IOR_MODE_UNCACHED,
25 IOR_MODE_CACHED,
26};
27
Thomas Gleixner240d3a72008-01-30 13:34:05 +010028#ifdef CONFIG_X86_64
29
30unsigned long __phys_addr(unsigned long x)
31{
32 if (x >= __START_KERNEL_map)
33 return x - __START_KERNEL_map + phys_base;
34 return x - PAGE_OFFSET;
35}
36EXPORT_SYMBOL(__phys_addr);
37
38#endif
39
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010040int page_is_ram(unsigned long pagenr)
41{
42 unsigned long addr, end;
43 int i;
44
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080045 /*
46 * A special case is the first 4Kb of memory;
47 * This is a BIOS owned area, not kernel ram, but generally
48 * not listed as such in the E820 table.
49 */
50 if (pagenr == 0)
51 return 0;
52
Arjan van de Ven156fbc32008-02-18 09:58:45 -080053 /*
54 * Second special case: Some BIOSen report the PC BIOS
55 * area (640->1Mb) as ram even though it is not.
56 */
57 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
58 pagenr < (BIOS_END >> PAGE_SHIFT))
59 return 0;
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080060
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010061 for (i = 0; i < e820.nr_map; i++) {
62 /*
63 * Not usable memory:
64 */
65 if (e820.map[i].type != E820_RAM)
66 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010067 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
68 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +010069
Thomas Gleixner950f9d92008-01-30 13:34:06 +010070
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010071 if ((pagenr >= addr) && (pagenr < end))
72 return 1;
73 }
74 return 0;
75}
76
Linus Torvalds1da177e2005-04-16 15:20:36 -070077/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010078 * Fix up the linear direct mapping of the kernel to avoid cache attribute
79 * conflicts.
80 */
Thomas Gleixner75ab43b2008-02-04 16:48:05 +010081static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010082 enum ioremap_mode mode)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010083{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010084 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +010085 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010086
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010087 switch (mode) {
88 case IOR_MODE_UNCACHED:
89 default:
90 err = set_memory_uc(vaddr, nrpages);
91 break;
92 case IOR_MODE_CACHED:
93 err = set_memory_wb(vaddr, nrpages);
94 break;
95 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010096
97 return err;
98}
99
100/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 * Remap an arbitrary physical address space into the kernel virtual
102 * address space. Needed when the kernel wants to access high addresses
103 * directly.
104 *
105 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
106 * have to convert them into an offset in a page-aligned mapping, but the
107 * caller shouldn't need to know that small detail.
108 */
Thomas Gleixner5f868152008-01-30 13:34:06 +0100109static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100110 enum ioremap_mode mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100112 unsigned long pfn, offset, last_addr, vaddr;
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100113 struct vm_struct *area;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100114 pgprot_t prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115
116 /* Don't allow wraparound or zero size */
117 last_addr = phys_addr + size - 1;
118 if (!size || last_addr < phys_addr)
119 return NULL;
120
121 /*
122 * Don't remap the low PCI/ISA area, it's always mapped..
123 */
124 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100125 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126
127 /*
128 * Don't allow anybody to remap normal RAM that we're using..
129 */
Ingo Molnar38cb47b2008-02-04 16:47:54 +0100130 for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped &&
131 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
132 if (page_is_ram(pfn) && pfn_valid(pfn) &&
133 !PageReserved(pfn_to_page(pfn)))
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100134 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 }
136
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100137 switch (mode) {
138 case IOR_MODE_UNCACHED:
139 default:
140 prot = PAGE_KERNEL_NOCACHE;
141 break;
142 case IOR_MODE_CACHED:
143 prot = PAGE_KERNEL;
144 break;
145 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 /*
148 * Mappings have to be page-aligned
149 */
150 offset = phys_addr & ~PAGE_MASK;
151 phys_addr &= PAGE_MASK;
152 size = PAGE_ALIGN(last_addr+1) - phys_addr;
153
154 /*
155 * Ok, go for it..
156 */
Thomas Gleixner74ff2852008-01-30 13:34:05 +0100157 area = get_vm_area(size, VM_IOREMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 if (!area)
159 return NULL;
160 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100161 vaddr = (unsigned long) area->addr;
162 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
163 remove_vm_area((void *)(vaddr & PAGE_MASK));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 return NULL;
165 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100166
Thomas Gleixner75ab43b2008-02-04 16:48:05 +0100167 if (ioremap_change_attr(vaddr, size, mode) < 0) {
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100168 vunmap(area->addr);
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100169 return NULL;
170 }
171
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100172 return (void __iomem *) (vaddr + offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
175/**
176 * ioremap_nocache - map bus memory into CPU space
177 * @offset: bus address of the memory
178 * @size: size of the resource to map
179 *
180 * ioremap_nocache performs a platform specific sequence of operations to
181 * make bus memory CPU accessible via the readb/readw/readl/writeb/
182 * writew/writel functions and the other mmio helpers. The returned
183 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100184 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 *
186 * This version of ioremap ensures that the memory is marked uncachable
187 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100188 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189 * busses. In particular driver authors should read up on PCI writes
190 *
191 * It's useful if some control registers are in such an area and
192 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100193 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 * Must be freed with iounmap.
195 */
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100196void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100198 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700200EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700201
Thomas Gleixner5f868152008-01-30 13:34:06 +0100202void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
203{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100204 return __ioremap(phys_addr, size, IOR_MODE_CACHED);
Thomas Gleixner5f868152008-01-30 13:34:06 +0100205}
206EXPORT_SYMBOL(ioremap_cache);
207
Andi Kleenbf5421c2005-12-12 22:17:09 -0800208/**
209 * iounmap - Free a IO remapping
210 * @addr: virtual address from ioremap_*
211 *
212 * Caller must ensure there is only one unmapping for the same pointer.
213 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214void iounmap(volatile void __iomem *addr)
215{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800216 struct vm_struct *p, *o;
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700217
218 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219 return;
220
221 /*
222 * __ioremap special-cases the PCI/ISA range by not instantiating a
223 * vm_area and by simply returning an address into the kernel mapping
224 * of ISA space. So handle that here.
225 */
226 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100227 addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700228 return;
229
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100230 addr = (volatile void __iomem *)
231 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800232
233 /* Use the vm area unlocked, assuming the caller
234 ensures there isn't another iounmap for the same address
235 in parallel. Reuse of the virtual address is prevented by
236 leaving it in the global lists until we're done with it.
237 cpa takes care of the direct mappings. */
238 read_lock(&vmlist_lock);
239 for (p = vmlist; p; p = p->next) {
240 if (p->addr == addr)
241 break;
242 }
243 read_unlock(&vmlist_lock);
244
245 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100246 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700247 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800248 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 }
250
Andi Kleenbf5421c2005-12-12 22:17:09 -0800251 /* Finally remove it */
252 o = remove_vm_area((void *)addr);
253 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100254 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700256EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100258#ifdef CONFIG_X86_32
Ingo Molnard18d6d62008-01-30 13:33:45 +0100259
260int __initdata early_ioremap_debug;
261
262static int __init early_ioremap_debug_setup(char *str)
263{
264 early_ioremap_debug = 1;
265
Huang, Ying793b24a2008-01-30 13:33:45 +0100266 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100267}
Huang, Ying793b24a2008-01-30 13:33:45 +0100268early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100269
Huang, Ying0947b2f2008-01-30 13:33:44 +0100270static __initdata int after_paging_init;
Ian Campbell551889a62008-02-09 23:24:09 +0100271static __initdata pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
Huang, Ying0947b2f2008-01-30 13:33:44 +0100272 __attribute__((aligned(PAGE_SIZE)));
273
Ian Campbell551889a62008-02-09 23:24:09 +0100274static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100275{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100276 /* Don't assume we're using swapper_pg_dir at this point */
277 pgd_t *base = __va(read_cr3());
278 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a62008-02-09 23:24:09 +0100279 pud_t *pud = pud_offset(pgd, addr);
280 pmd_t *pmd = pmd_offset(pud, addr);
281
282 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100283}
284
Ian Campbell551889a62008-02-09 23:24:09 +0100285static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100286{
Ian Campbell551889a62008-02-09 23:24:09 +0100287 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100288}
289
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100290void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100291{
Ian Campbell551889a62008-02-09 23:24:09 +0100292 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100293
Ingo Molnard18d6d62008-01-30 13:33:45 +0100294 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100295 printk(KERN_INFO "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100296
Ian Campbell551889a62008-02-09 23:24:09 +0100297 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100298 memset(bm_pte, 0, sizeof(bm_pte));
Ian Campbellb6fbb662008-02-09 23:24:09 +0100299 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a62008-02-09 23:24:09 +0100300
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100301 /*
Ian Campbell551889a62008-02-09 23:24:09 +0100302 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100303 * we are not prepared:
304 */
Ian Campbell551889a62008-02-09 23:24:09 +0100305 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100306 WARN_ON(1);
Ian Campbell551889a62008-02-09 23:24:09 +0100307 printk(KERN_WARNING "pmd %p != %p\n",
308 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100309 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a62008-02-09 23:24:09 +0100310 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100311 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a62008-02-09 23:24:09 +0100312 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100313
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100314 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
315 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
316 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100317 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100318}
319
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100320void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100321{
Ian Campbell551889a62008-02-09 23:24:09 +0100322 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100323
Ingo Molnard18d6d62008-01-30 13:33:45 +0100324 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100325 printk(KERN_INFO "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100326
Ian Campbell551889a62008-02-09 23:24:09 +0100327 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
328 pmd_clear(pmd);
Ian Campbellb6fbb662008-02-09 23:24:09 +0100329 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100330 __flush_tlb_all();
331}
332
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100333void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100334{
335 enum fixed_addresses idx;
Ian Campbell551889a62008-02-09 23:24:09 +0100336 unsigned long addr, phys;
337 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100338
339 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100340 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100341 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100342 pte = early_ioremap_pte(addr);
Ian Campbell551889a62008-02-09 23:24:09 +0100343 if (pte_present(*pte)) {
344 phys = pte_val(*pte) & PAGE_MASK;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100345 set_fixmap(idx, phys);
346 }
347 }
348}
349
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100350static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100351 unsigned long phys, pgprot_t flags)
352{
Ian Campbell551889a62008-02-09 23:24:09 +0100353 unsigned long addr = __fix_to_virt(idx);
354 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100355
356 if (idx >= __end_of_fixed_addresses) {
357 BUG();
358 return;
359 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100360 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100361 if (pgprot_val(flags))
Ian Campbell551889a62008-02-09 23:24:09 +0100362 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100363 else
Ian Campbell551889a62008-02-09 23:24:09 +0100364 pte_clear(NULL, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100365 __flush_tlb_one(addr);
366}
367
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100368static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100369 unsigned long phys)
370{
371 if (after_paging_init)
372 set_fixmap(idx, phys);
373 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100374 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100375}
376
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100377static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100378{
379 if (after_paging_init)
380 clear_fixmap(idx);
381 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100382 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100383}
384
Ingo Molnar1b42f512008-01-30 13:33:45 +0100385
386int __initdata early_ioremap_nested;
387
Ingo Molnard690b2a2008-01-30 13:33:47 +0100388static int __init check_early_ioremap_leak(void)
389{
390 if (!early_ioremap_nested)
391 return 0;
392
393 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100394 "Debug warning: early ioremap leak of %d areas detected.\n",
395 early_ioremap_nested);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100396 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100397 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100398 WARN_ON(1);
399
400 return 1;
401}
402late_initcall(check_early_ioremap_leak);
403
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100404void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700405{
406 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100407 unsigned int nrpages, nesting;
408 enum fixed_addresses idx0, idx;
409
410 WARN_ON(system_state != SYSTEM_BOOTING);
411
412 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100413 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100414 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100415 phys_addr, size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100416 dump_stack();
417 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418
419 /* Don't allow wraparound or zero size */
420 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100421 if (!size || last_addr < phys_addr) {
422 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700423 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100424 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100426 if (nesting >= FIX_BTMAPS_NESTING) {
427 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100428 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100429 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100430 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 /*
432 * Mappings have to be page-aligned
433 */
434 offset = phys_addr & ~PAGE_MASK;
435 phys_addr &= PAGE_MASK;
436 size = PAGE_ALIGN(last_addr) - phys_addr;
437
438 /*
439 * Mappings have to fit in the FIX_BTMAP area.
440 */
441 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100442 if (nrpages > NR_FIX_BTMAPS) {
443 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100445 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446
447 /*
448 * Ok, go for it..
449 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100450 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
451 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700452 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100453 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 phys_addr += PAGE_SIZE;
455 --idx;
456 --nrpages;
457 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100458 if (early_ioremap_debug)
459 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100460
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100461 return (void *) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700462}
463
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100464void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465{
466 unsigned long virt_addr;
467 unsigned long offset;
468 unsigned int nrpages;
469 enum fixed_addresses idx;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100470 unsigned int nesting;
471
472 nesting = --early_ioremap_nested;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100473 WARN_ON(nesting < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700474
Ingo Molnard18d6d62008-01-30 13:33:45 +0100475 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100476 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100477 size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100478 dump_stack();
479 }
480
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100482 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
483 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100485 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 offset = virt_addr & ~PAGE_MASK;
487 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
488
Ingo Molnar1b42f512008-01-30 13:33:45 +0100489 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100491 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 --idx;
493 --nrpages;
494 }
495}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100496
497void __this_fixmap_does_not_exist(void)
498{
499 WARN_ON(1);
500}
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100501
502#endif /* CONFIG_X86_32 */