blob: 8777bb7688f428edf689f070822ebe26a92addb1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010016#include <asm/cacheflush.h>
17#include <asm/e820.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021
Thomas Gleixner240d3a72008-01-30 13:34:05 +010022#ifdef CONFIG_X86_64
23
24unsigned long __phys_addr(unsigned long x)
25{
26 if (x >= __START_KERNEL_map)
27 return x - __START_KERNEL_map + phys_base;
28 return x - PAGE_OFFSET;
29}
30EXPORT_SYMBOL(__phys_addr);
31
32#endif
33
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010034int page_is_ram(unsigned long pagenr)
35{
36 unsigned long addr, end;
37 int i;
38
39 for (i = 0; i < e820.nr_map; i++) {
40 /*
41 * Not usable memory:
42 */
43 if (e820.map[i].type != E820_RAM)
44 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010045 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
46 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +010047
48 /*
49 * Sanity check: Some BIOSen report areas as RAM that
50 * are not. Notably the 640->1Mb area, which is the
51 * PCI BIOS area.
52 */
53 if (addr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
54 end < (BIOS_END >> PAGE_SHIFT))
55 continue;
56
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010057 if ((pagenr >= addr) && (pagenr < end))
58 return 1;
59 }
60 return 0;
61}
62
Linus Torvalds1da177e2005-04-16 15:20:36 -070063/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010064 * Fix up the linear direct mapping of the kernel to avoid cache attribute
65 * conflicts.
66 */
67static int ioremap_change_attr(unsigned long phys_addr, unsigned long size,
68 pgprot_t prot)
69{
70 unsigned long npages, vaddr, last_addr = phys_addr + size - 1;
71 int err, level;
72
73 /* No change for pages after the last mapping */
74 if (last_addr >= (max_pfn_mapped << PAGE_SHIFT))
75 return 0;
76
77 npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
78 vaddr = (unsigned long) __va(phys_addr);
79
80 /*
81 * If there is no identity map for this address,
82 * change_page_attr_addr is unnecessary
83 */
84 if (!lookup_address(vaddr, &level))
85 return 0;
86
87 /*
88 * Must use an address here and not struct page because the
89 * phys addr can be a in hole between nodes and not have a
90 * memmap entry.
91 */
92 err = change_page_attr_addr(vaddr, npages, prot);
Thomas Gleixner240d3a72008-01-30 13:34:05 +010093
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010094 if (!err)
95 global_flush_tlb();
96
97 return err;
98}
99
100/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 * Remap an arbitrary physical address space into the kernel virtual
102 * address space. Needed when the kernel wants to access high addresses
103 * directly.
104 *
105 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
106 * have to convert them into an offset in a page-aligned mapping, but the
107 * caller shouldn't need to know that small detail.
108 */
Thomas Gleixner5f868152008-01-30 13:34:06 +0100109static void __iomem *__ioremap(unsigned long phys_addr, unsigned long size,
110 unsigned long flags)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111{
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100112 void __iomem *addr;
113 struct vm_struct *area;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114 unsigned long offset, last_addr;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100115 pgprot_t pgprot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
117 /* Don't allow wraparound or zero size */
118 last_addr = phys_addr + size - 1;
119 if (!size || last_addr < phys_addr)
120 return NULL;
121
122 /*
123 * Don't remap the low PCI/ISA area, it's always mapped..
124 */
125 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100126 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128 /*
129 * Don't allow anybody to remap normal RAM that we're using..
130 */
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100131 for (offset = phys_addr >> PAGE_SHIFT; offset < max_pfn_mapped &&
132 (offset << PAGE_SHIFT) < last_addr; offset++) {
133 if (page_is_ram(offset))
134 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 }
136
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100137 pgprot = MAKE_GLOBAL(__PAGE_KERNEL | flags);
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700138
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 /*
140 * Mappings have to be page-aligned
141 */
142 offset = phys_addr & ~PAGE_MASK;
143 phys_addr &= PAGE_MASK;
144 size = PAGE_ALIGN(last_addr+1) - phys_addr;
145
146 /*
147 * Ok, go for it..
148 */
Thomas Gleixner74ff2852008-01-30 13:34:05 +0100149 area = get_vm_area(size, VM_IOREMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700150 if (!area)
151 return NULL;
152 area->phys_addr = phys_addr;
153 addr = (void __iomem *) area->addr;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100154 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
155 phys_addr, pgprot)) {
Thomas Gleixnere4c1b972008-01-30 13:34:05 +0100156 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 return NULL;
158 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100159
160 if (ioremap_change_attr(phys_addr, size, pgprot) < 0) {
161 vunmap(addr);
162 return NULL;
163 }
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 return (void __iomem *) (offset + (char __iomem *)addr);
166}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168/**
169 * ioremap_nocache - map bus memory into CPU space
170 * @offset: bus address of the memory
171 * @size: size of the resource to map
172 *
173 * ioremap_nocache performs a platform specific sequence of operations to
174 * make bus memory CPU accessible via the readb/readw/readl/writeb/
175 * writew/writel functions and the other mmio helpers. The returned
176 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100177 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 *
179 * This version of ioremap ensures that the memory is marked uncachable
180 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100181 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182 * busses. In particular driver authors should read up on PCI writes
183 *
184 * It's useful if some control registers are in such an area and
185 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100186 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 * Must be freed with iounmap.
188 */
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100189void __iomem *ioremap_nocache(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190{
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100191 return __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700193EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194
Thomas Gleixner5f868152008-01-30 13:34:06 +0100195void __iomem *ioremap_cache(unsigned long phys_addr, unsigned long size)
196{
197 return __ioremap(phys_addr, size, 0);
198}
199EXPORT_SYMBOL(ioremap_cache);
200
Andi Kleenbf5421c2005-12-12 22:17:09 -0800201/**
202 * iounmap - Free a IO remapping
203 * @addr: virtual address from ioremap_*
204 *
205 * Caller must ensure there is only one unmapping for the same pointer.
206 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207void iounmap(volatile void __iomem *addr)
208{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800209 struct vm_struct *p, *o;
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700210
211 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 return;
213
214 /*
215 * __ioremap special-cases the PCI/ISA range by not instantiating a
216 * vm_area and by simply returning an address into the kernel mapping
217 * of ISA space. So handle that here.
218 */
219 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100220 addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 return;
222
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100223 addr = (volatile void __iomem *)
224 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800225
226 /* Use the vm area unlocked, assuming the caller
227 ensures there isn't another iounmap for the same address
228 in parallel. Reuse of the virtual address is prevented by
229 leaving it in the global lists until we're done with it.
230 cpa takes care of the direct mappings. */
231 read_lock(&vmlist_lock);
232 for (p = vmlist; p; p = p->next) {
233 if (p->addr == addr)
234 break;
235 }
236 read_unlock(&vmlist_lock);
237
238 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100239 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700240 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800241 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 }
243
Andi Kleenbf5421c2005-12-12 22:17:09 -0800244 /* Reset the direct mapping. Can block */
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100245 ioremap_change_attr(p->phys_addr, p->size, PAGE_KERNEL);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800246
247 /* Finally remove it */
248 o = remove_vm_area((void *)addr);
249 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100250 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700252EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100254#ifdef CONFIG_X86_32
Ingo Molnard18d6d62008-01-30 13:33:45 +0100255
256int __initdata early_ioremap_debug;
257
258static int __init early_ioremap_debug_setup(char *str)
259{
260 early_ioremap_debug = 1;
261
Huang, Ying793b24a2008-01-30 13:33:45 +0100262 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100263}
Huang, Ying793b24a2008-01-30 13:33:45 +0100264early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100265
Huang, Ying0947b2f2008-01-30 13:33:44 +0100266static __initdata int after_paging_init;
267static __initdata unsigned long bm_pte[1024]
268 __attribute__((aligned(PAGE_SIZE)));
269
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100270static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100271{
272 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
273}
274
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100275static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100276{
277 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
278}
279
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100280void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100281{
282 unsigned long *pgd;
283
Ingo Molnard18d6d62008-01-30 13:33:45 +0100284 if (early_ioremap_debug)
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100285 printk(KERN_DEBUG "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100286
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100287 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100288 *pgd = __pa(bm_pte) | _PAGE_TABLE;
289 memset(bm_pte, 0, sizeof(bm_pte));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100290 /*
291 * The boot-ioremap range spans multiple pgds, for which
292 * we are not prepared:
293 */
294 if (pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END))) {
295 WARN_ON(1);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100296 printk(KERN_WARNING "pgd %p != %p\n",
297 pgd, early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
298 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
299 fix_to_virt(FIX_BTMAP_BEGIN));
300 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
301 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100302
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100303 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
304 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
305 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100306 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100307}
308
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100309void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100310{
311 unsigned long *pgd;
312
Ingo Molnard18d6d62008-01-30 13:33:45 +0100313 if (early_ioremap_debug)
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100314 printk(KERN_DEBUG "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100315
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100316 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100317 *pgd = 0;
318 __flush_tlb_all();
319}
320
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100321void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100322{
323 enum fixed_addresses idx;
324 unsigned long *pte, phys, addr;
325
326 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100327 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100328 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100329 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100330 if (!*pte & _PAGE_PRESENT) {
331 phys = *pte & PAGE_MASK;
332 set_fixmap(idx, phys);
333 }
334 }
335}
336
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100337static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100338 unsigned long phys, pgprot_t flags)
339{
340 unsigned long *pte, addr = __fix_to_virt(idx);
341
342 if (idx >= __end_of_fixed_addresses) {
343 BUG();
344 return;
345 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100346 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100347 if (pgprot_val(flags))
348 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
349 else
350 *pte = 0;
351 __flush_tlb_one(addr);
352}
353
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100354static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100355 unsigned long phys)
356{
357 if (after_paging_init)
358 set_fixmap(idx, phys);
359 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100360 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100361}
362
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100363static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100364{
365 if (after_paging_init)
366 clear_fixmap(idx);
367 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100368 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100369}
370
Ingo Molnar1b42f512008-01-30 13:33:45 +0100371
372int __initdata early_ioremap_nested;
373
Ingo Molnard690b2a2008-01-30 13:33:47 +0100374static int __init check_early_ioremap_leak(void)
375{
376 if (!early_ioremap_nested)
377 return 0;
378
379 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100380 "Debug warning: early ioremap leak of %d areas detected.\n",
381 early_ioremap_nested);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100382 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100383 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100384 WARN_ON(1);
385
386 return 1;
387}
388late_initcall(check_early_ioremap_leak);
389
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100390void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700391{
392 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100393 unsigned int nrpages, nesting;
394 enum fixed_addresses idx0, idx;
395
396 WARN_ON(system_state != SYSTEM_BOOTING);
397
398 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100399 if (early_ioremap_debug) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100400 printk(KERN_DEBUG "early_ioremap(%08lx, %08lx) [%d] => ",
401 phys_addr, size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100402 dump_stack();
403 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404
405 /* Don't allow wraparound or zero size */
406 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100407 if (!size || last_addr < phys_addr) {
408 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100410 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100412 if (nesting >= FIX_BTMAPS_NESTING) {
413 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100414 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100415 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100416 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417 /*
418 * Mappings have to be page-aligned
419 */
420 offset = phys_addr & ~PAGE_MASK;
421 phys_addr &= PAGE_MASK;
422 size = PAGE_ALIGN(last_addr) - phys_addr;
423
424 /*
425 * Mappings have to fit in the FIX_BTMAP area.
426 */
427 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100428 if (nrpages > NR_FIX_BTMAPS) {
429 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700430 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100431 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432
433 /*
434 * Ok, go for it..
435 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100436 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
437 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100439 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 phys_addr += PAGE_SIZE;
441 --idx;
442 --nrpages;
443 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100444 if (early_ioremap_debug)
445 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100446
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100447 return (void *) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448}
449
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100450void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451{
452 unsigned long virt_addr;
453 unsigned long offset;
454 unsigned int nrpages;
455 enum fixed_addresses idx;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100456 unsigned int nesting;
457
458 nesting = --early_ioremap_nested;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100459 WARN_ON(nesting < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460
Ingo Molnard18d6d62008-01-30 13:33:45 +0100461 if (early_ioremap_debug) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100462 printk(KERN_DEBUG "early_iounmap(%p, %08lx) [%d]\n", addr,
463 size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100464 dump_stack();
465 }
466
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100468 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
469 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100471 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 offset = virt_addr & ~PAGE_MASK;
473 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
474
Ingo Molnar1b42f512008-01-30 13:33:45 +0100475 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100477 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 --idx;
479 --nrpages;
480 }
481}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100482
483void __this_fixmap_does_not_exist(void)
484{
485 WARN_ON(1);
486}
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100487
488#endif /* CONFIG_X86_32 */