blob: d3026e1906f92aaa7d20390754c3755b6a364bbe [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010016#include <asm/cacheflush.h>
17#include <asm/e820.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Thomas Gleixner240d3a72008-01-30 13:34:05 +010022#ifdef CONFIG_X86_64
23
24unsigned long __phys_addr(unsigned long x)
25{
26 if (x >= __START_KERNEL_map)
27 return x - __START_KERNEL_map + phys_base;
28 return x - PAGE_OFFSET;
29}
30EXPORT_SYMBOL(__phys_addr);
31
32#endif
33
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010034int page_is_ram(unsigned long pagenr)
35{
36 unsigned long addr, end;
37 int i;
38
39 for (i = 0; i < e820.nr_map; i++) {
40 /*
41 * Not usable memory:
42 */
43 if (e820.map[i].type != E820_RAM)
44 continue;
45 /*
46 * !!!FIXME!!! Some BIOSen report areas as RAM that
47 * are not. Notably the 640->1Mb area. We need a sanity
48 * check here.
49 */
50 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
51 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
52 if ((pagenr >= addr) && (pagenr < end))
53 return 1;
54 }
55 return 0;
56}
57
Linus Torvalds1da177e2005-04-16 15:20:36 -070058/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010059 * Fix up the linear direct mapping of the kernel to avoid cache attribute
60 * conflicts.
61 */
62static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
63 pgprot_t prot)
64{
65 unsigned long npages, vaddr, last_addr = phys_addr + size - 1;
66 int err, level;
67
68 /* No change for pages after the last mapping */
69 if (last_addr >= (max_pfn_mapped << PAGE_SHIFT))
70 return 0;
71
72 npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
73 vaddr = (unsigned long) __va(phys_addr);
74
75 /*
76 * If there is no identity map for this address,
77 * change_page_attr_addr is unnecessary
78 */
79 if (!lookup_address(vaddr, &level))
80 return 0;
81
82 /*
83 * Must use an address here and not struct page because the
84 * phys addr can be a in hole between nodes and not have a
85 * memmap entry.
86 */
87 err = change_page_attr_addr(vaddr, npages, prot);
Thomas Gleixner240d3a72008-01-30 13:34:05 +010088
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010089 if (!err)
90 global_flush_tlb();
91
92 return err;
93}
94
95/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 * Remap an arbitrary physical address space into the kernel virtual
97 * address space. Needed when the kernel wants to access high addresses
98 * directly.
99 *
100 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
101 * have to convert them into an offset in a page-aligned mapping, but the
102 * caller shouldn't need to know that small detail.
103 */
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100104void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
105 unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106{
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100107 void __iomem *addr;
108 struct vm_struct *area;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 unsigned long offset, last_addr;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100110 pgprot_t pgprot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
112 /* Don't allow wraparound or zero size */
113 last_addr = phys_addr + size - 1;
114 if (!size || last_addr < phys_addr)
115 return NULL;
116
117 /*
118 * Don't remap the low PCI/ISA area, it's always mapped..
119 */
120 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100121 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100123#ifdef CONFIG_X86_32
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 /*
125 * Don't allow anybody to remap normal RAM that we're using..
126 */
127 if (phys_addr <= virt_to_phys(high_memory - 1)) {
128 char *t_addr, *t_end;
129 struct page *page;
130
131 t_addr = __va(phys_addr);
132 t_end = t_addr + (size - 1);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100133
134 for (page = virt_to_page(t_addr);
135 page <= virt_to_page(t_end); page++)
136 if (!PageReserved(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 return NULL;
138 }
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100139#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100141 pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700142
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 /*
144 * Mappings have to be page-aligned
145 */
146 offset = phys_addr & ~PAGE_MASK;
147 phys_addr &= PAGE_MASK;
148 size = PAGE_ALIGN(last_addr+1) - phys_addr;
149
150 /*
151 * Ok, go for it..
152 */
Thomas Gleixner74ff2852008-01-30 13:34:05 +0100153 area = get_vm_area(size, VM_IOREMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 if (!area)
155 return NULL;
156 area->phys_addr = phys_addr;
157 addr = (void __iomem *) area->addr;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100158 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
159 phys_addr, pgprot)) {
Thomas Gleixnere4c1b972008-01-30 13:34:05 +0100160 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 return NULL;
162 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100163
164 if (ioremap_change_attr(phys_addr, size, pgprot) < 0) {
165 vunmap(addr);
166 return NULL;
167 }
168
Linus Torvalds1da177e2005-04-16 15:20:36 -0700169 return (void __iomem *) (offset + (char __iomem *)addr);
170}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700171EXPORT_SYMBOL(__ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172
173/**
174 * ioremap_nocache - map bus memory into CPU space
175 * @offset: bus address of the memory
176 * @size: size of the resource to map
177 *
178 * ioremap_nocache performs a platform specific sequence of operations to
179 * make bus memory CPU accessible via the readb/readw/readl/writeb/
180 * writew/writel functions and the other mmio helpers. The returned
181 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100182 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 *
184 * This version of ioremap ensures that the memory is marked uncachable
185 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100186 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 * busses. In particular driver authors should read up on PCI writes
188 *
189 * It's useful if some control registers are in such an area and
190 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100191 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192 * Must be freed with iounmap.
193 */
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100194void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700195{
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100196 return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700198EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199
Andi Kleenbf5421c2005-12-12 22:17:09 -0800200/**
201 * iounmap - Free a IO remapping
202 * @addr: virtual address from ioremap_*
203 *
204 * Caller must ensure there is only one unmapping for the same pointer.
205 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206void iounmap(volatile void __iomem *addr)
207{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800208 struct vm_struct *p, *o;
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700209
210 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 return;
212
213 /*
214 * __ioremap special-cases the PCI/ISA range by not instantiating a
215 * vm_area and by simply returning an address into the kernel mapping
216 * of ISA space. So handle that here.
217 */
218 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100219 addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 return;
221
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100222 addr = (volatile void __iomem *)
223 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800224
225 /* Use the vm area unlocked, assuming the caller
226 ensures there isn't another iounmap for the same address
227 in parallel. Reuse of the virtual address is prevented by
228 leaving it in the global lists until we're done with it.
229 cpa takes care of the direct mappings. */
230 read_lock(&vmlist_lock);
231 for (p = vmlist; p; p = p->next) {
232 if (p->addr == addr)
233 break;
234 }
235 read_unlock(&vmlist_lock);
236
237 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100238 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700239 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800240 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241 }
242
Andi Kleenbf5421c2005-12-12 22:17:09 -0800243 /* Reset the direct mapping. Can block */
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100244 ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800245
246 /* Finally remove it */
247 o = remove_vm_area((void *)addr);
248 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100249 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700250}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700251EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100253#ifdef CONFIG_X86_32
Ingo Molnard18d6d62008-01-30 13:33:45 +0100254
255int __initdata early_ioremap_debug;
256
257static int __init early_ioremap_debug_setup(char *str)
258{
259 early_ioremap_debug = 1;
260
Huang, Ying793b24a2008-01-30 13:33:45 +0100261 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100262}
Huang, Ying793b24a2008-01-30 13:33:45 +0100263early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100264
Huang, Ying0947b2f2008-01-30 13:33:44 +0100265static __initdata int after_paging_init;
266static __initdata unsigned long bm_pte[1024]
267 __attribute__((aligned(PAGE_SIZE)));
268
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100269static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100270{
271 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
272}
273
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100274static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100275{
276 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
277}
278
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100279void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100280{
281 unsigned long *pgd;
282
Ingo Molnard18d6d62008-01-30 13:33:45 +0100283 if (early_ioremap_debug)
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100284 printk(KERN_DEBUG "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100285
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100286 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100287 *pgd = __pa(bm_pte) | _PAGE_TABLE;
288 memset(bm_pte, 0, sizeof(bm_pte));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100289 /*
290 * The boot-ioremap range spans multiple pgds, for which
291 * we are not prepared:
292 */
293 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
294 WARN_ON(1);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100295 printk(KERN_WARNING "pgd %p != %p\n",
296 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
297 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
298 fix_to_virt(FIX_BTMAP_BEGIN));
299 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
300 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100301
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100302 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
303 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
304 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100305 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100306}
307
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100308void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100309{
310 unsigned long *pgd;
311
Ingo Molnard18d6d62008-01-30 13:33:45 +0100312 if (early_ioremap_debug)
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100313 printk(KERN_DEBUG "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100314
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100315 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100316 *pgd = 0;
317 __flush_tlb_all();
318}
319
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100320void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100321{
322 enum fixed_addresses idx;
323 unsigned long *pte, phys, addr;
324
325 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100326 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100327 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100328 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100329 if (!*pte & _PAGE_PRESENT) {
330 phys = *pte & PAGE_MASK;
331 set_fixmap(idx, phys);
332 }
333 }
334}
335
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100336static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100337 unsigned long phys, pgprot_t flags)
338{
339 unsigned long *pte, addr = __fix_to_virt(idx);
340
341 if (idx >= __end_of_fixed_addresses) {
342 BUG();
343 return;
344 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100345 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100346 if (pgprot_val(flags))
347 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
348 else
349 *pte = 0;
350 __flush_tlb_one(addr);
351}
352
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100353static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100354 unsigned long phys)
355{
356 if (after_paging_init)
357 set_fixmap(idx, phys);
358 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100359 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100360}
361
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100362static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100363{
364 if (after_paging_init)
365 clear_fixmap(idx);
366 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100367 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100368}
369
Ingo Molnar1b42f512008-01-30 13:33:45 +0100370
371int __initdata early_ioremap_nested;
372
Ingo Molnard690b2a2008-01-30 13:33:47 +0100373static int __init check_early_ioremap_leak(void)
374{
375 if (!early_ioremap_nested)
376 return 0;
377
378 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100379 "Debug warning: early ioremap leak of %d areas detected.\n",
380 early_ioremap_nested);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100381 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100382 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100383 WARN_ON(1);
384
385 return 1;
386}
387late_initcall(check_early_ioremap_leak);
388
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100389void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
391 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100392 unsigned int nrpages, nesting;
393 enum fixed_addresses idx0, idx;
394
395 WARN_ON(system_state != SYSTEM_BOOTING);
396
397 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100398 if (early_ioremap_debug) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100399 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
400 phys_addr, size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100401 dump_stack();
402 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700403
404 /* Don't allow wraparound or zero size */
405 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100406 if (!size || last_addr < phys_addr) {
407 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100409 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700410
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100411 if (nesting >= FIX_BTMAPS_NESTING) {
412 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100413 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100414 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100415 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 /*
417 * Mappings have to be page-aligned
418 */
419 offset = phys_addr & ~PAGE_MASK;
420 phys_addr &= PAGE_MASK;
421 size = PAGE_ALIGN(last_addr) - phys_addr;
422
423 /*
424 * Mappings have to fit in the FIX_BTMAP area.
425 */
426 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100427 if (nrpages > NR_FIX_BTMAPS) {
428 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100430 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431
432 /*
433 * Ok, go for it..
434 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100435 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
436 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100438 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439 phys_addr += PAGE_SIZE;
440 --idx;
441 --nrpages;
442 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100443 if (early_ioremap_debug)
444 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100445
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100446 return (void *) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447}
448
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100449void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
451 unsigned long virt_addr;
452 unsigned long offset;
453 unsigned int nrpages;
454 enum fixed_addresses idx;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100455 unsigned int nesting;
456
457 nesting = --early_ioremap_nested;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100458 WARN_ON(nesting < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
Ingo Molnard18d6d62008-01-30 13:33:45 +0100460 if (early_ioremap_debug) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100461 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
462 size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100463 dump_stack();
464 }
465
Linus Torvalds1da177e2005-04-16 15:20:36 -0700466 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100467 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
468 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100470 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 offset = virt_addr & ~PAGE_MASK;
472 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
473
Ingo Molnar1b42f512008-01-30 13:33:45 +0100474 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100476 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 --idx;
478 --nrpages;
479 }
480}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100481
482void __this_fixmap_does_not_exist(void)
483{
484 WARN_ON(1);
485}
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100486
487#endif /* CONFIG_X86_32 */