blob: 3d0a589d92c40ebcf33882bf9d1b331894953037 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010016#include <asm/cacheflush.h>
17#include <asm/e820.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010021#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010023enum ioremap_mode {
24 IOR_MODE_UNCACHED,
25 IOR_MODE_CACHED,
26};
27
Thomas Gleixner240d3a72008-01-30 13:34:05 +010028#ifdef CONFIG_X86_64
29
30unsigned long __phys_addr(unsigned long x)
31{
32 if (x >= __START_KERNEL_map)
33 return x - __START_KERNEL_map + phys_base;
34 return x - PAGE_OFFSET;
35}
36EXPORT_SYMBOL(__phys_addr);
37
Thomas Gleixnere3100c82008-02-27 20:57:40 +010038static inline int phys_addr_valid(unsigned long addr)
39{
40 return addr < (1UL << boot_cpu_data.x86_phys_bits);
41}
42
43#else
44
45static inline int phys_addr_valid(unsigned long addr)
46{
47 return 1;
48}
49
Thomas Gleixner240d3a72008-01-30 13:34:05 +010050#endif
51
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010052int page_is_ram(unsigned long pagenr)
53{
54 unsigned long addr, end;
55 int i;
56
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080057 /*
58 * A special case is the first 4Kb of memory;
59 * This is a BIOS owned area, not kernel ram, but generally
60 * not listed as such in the E820 table.
61 */
62 if (pagenr == 0)
63 return 0;
64
Arjan van de Ven156fbc32008-02-18 09:58:45 -080065 /*
66 * Second special case: Some BIOSen report the PC BIOS
67 * area (640->1Mb) as ram even though it is not.
68 */
69 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
70 pagenr < (BIOS_END >> PAGE_SHIFT))
71 return 0;
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080072
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010073 for (i = 0; i < e820.nr_map; i++) {
74 /*
75 * Not usable memory:
76 */
77 if (e820.map[i].type != E820_RAM)
78 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010079 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
80 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +010081
Thomas Gleixner950f9d92008-01-30 13:34:06 +010082
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010083 if ((pagenr >= addr) && (pagenr < end))
84 return 1;
85 }
86 return 0;
87}
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010090 * Fix up the linear direct mapping of the kernel to avoid cache attribute
91 * conflicts.
92 */
Thomas Gleixner75ab43b2008-02-04 16:48:05 +010093static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010094 enum ioremap_mode mode)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010095{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010096 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +010097 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010098
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010099 switch (mode) {
100 case IOR_MODE_UNCACHED:
101 default:
102 err = set_memory_uc(vaddr, nrpages);
103 break;
104 case IOR_MODE_CACHED:
105 err = set_memory_wb(vaddr, nrpages);
106 break;
107 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100108
109 return err;
110}
111
112/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 * Remap an arbitrary physical address space into the kernel virtual
114 * address space. Needed when the kernel wants to access high addresses
115 * directly.
116 *
117 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118 * have to convert them into an offset in a page-aligned mapping, but the
119 * caller shouldn't need to know that small detail.
120 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700121static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100122 enum ioremap_mode mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100124 unsigned long pfn, offset, last_addr, vaddr;
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100125 struct vm_struct *area;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100126 pgprot_t prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128 /* Don't allow wraparound or zero size */
129 last_addr = phys_addr + size - 1;
130 if (!size || last_addr < phys_addr)
131 return NULL;
132
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100133 if (!phys_addr_valid(phys_addr)) {
134 printk(KERN_WARNING "ioremap: invalid physical address %lx\n",
135 phys_addr);
136 WARN_ON_ONCE(1);
137 return NULL;
138 }
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 /*
141 * Don't remap the low PCI/ISA area, it's always mapped..
142 */
143 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100144 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146 /*
147 * Don't allow anybody to remap normal RAM that we're using..
148 */
Ingo Molnarbdd3cee2008-02-28 14:10:49 +0100149 for (pfn = phys_addr >> PAGE_SHIFT;
150 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
151
Ingo Molnarba748d22008-03-03 09:37:41 +0100152 int is_ram = page_is_ram(pfn);
153
154 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100155 return NULL;
Ingo Molnarba748d22008-03-03 09:37:41 +0100156 WARN_ON_ONCE(is_ram);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 }
158
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100159 switch (mode) {
160 case IOR_MODE_UNCACHED:
161 default:
Suresh Siddhad546b672008-03-25 17:39:12 -0700162 /*
163 * FIXME: we will use UC MINUS for now, as video fb drivers
164 * depend on it. Upcoming ioremap_wc() will fix this behavior.
165 */
166 prot = PAGE_KERNEL_UC_MINUS;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100167 break;
168 case IOR_MODE_CACHED:
169 prot = PAGE_KERNEL;
170 break;
171 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700172
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 /*
174 * Mappings have to be page-aligned
175 */
176 offset = phys_addr & ~PAGE_MASK;
177 phys_addr &= PAGE_MASK;
178 size = PAGE_ALIGN(last_addr+1) - phys_addr;
179
180 /*
181 * Ok, go for it..
182 */
Thomas Gleixner74ff2852008-01-30 13:34:05 +0100183 area = get_vm_area(size, VM_IOREMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700184 if (!area)
185 return NULL;
186 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100187 vaddr = (unsigned long) area->addr;
188 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
Ingo Molnarb16bf712008-02-28 14:02:08 +0100189 free_vm_area(area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 return NULL;
191 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100192
Thomas Gleixner75ab43b2008-02-04 16:48:05 +0100193 if (ioremap_change_attr(vaddr, size, mode) < 0) {
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100194 vunmap(area->addr);
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100195 return NULL;
196 }
197
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100198 return (void __iomem *) (vaddr + offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200
201/**
202 * ioremap_nocache - map bus memory into CPU space
203 * @offset: bus address of the memory
204 * @size: size of the resource to map
205 *
206 * ioremap_nocache performs a platform specific sequence of operations to
207 * make bus memory CPU accessible via the readb/readw/readl/writeb/
208 * writew/writel functions and the other mmio helpers. The returned
209 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100210 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 *
212 * This version of ioremap ensures that the memory is marked uncachable
213 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100214 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 * busses. In particular driver authors should read up on PCI writes
216 *
217 * It's useful if some control registers are in such an area and
218 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100219 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 * Must be freed with iounmap.
221 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700222void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700223{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100224 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700226EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700228void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100229{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100230 return __ioremap(phys_addr, size, IOR_MODE_CACHED);
Thomas Gleixner5f868152008-01-30 13:34:06 +0100231}
232EXPORT_SYMBOL(ioremap_cache);
233
Andi Kleenbf5421c2005-12-12 22:17:09 -0800234/**
235 * iounmap - Free a IO remapping
236 * @addr: virtual address from ioremap_*
237 *
238 * Caller must ensure there is only one unmapping for the same pointer.
239 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240void iounmap(volatile void __iomem *addr)
241{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800242 struct vm_struct *p, *o;
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700243
244 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 return;
246
247 /*
248 * __ioremap special-cases the PCI/ISA range by not instantiating a
249 * vm_area and by simply returning an address into the kernel mapping
250 * of ISA space. So handle that here.
251 */
252 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100253 addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254 return;
255
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100256 addr = (volatile void __iomem *)
257 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800258
259 /* Use the vm area unlocked, assuming the caller
260 ensures there isn't another iounmap for the same address
261 in parallel. Reuse of the virtual address is prevented by
262 leaving it in the global lists until we're done with it.
263 cpa takes care of the direct mappings. */
264 read_lock(&vmlist_lock);
265 for (p = vmlist; p; p = p->next) {
266 if (p->addr == addr)
267 break;
268 }
269 read_unlock(&vmlist_lock);
270
271 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100272 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700273 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800274 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 }
276
Andi Kleenbf5421c2005-12-12 22:17:09 -0800277 /* Finally remove it */
278 o = remove_vm_area((void *)addr);
279 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100280 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700282EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100284#ifdef CONFIG_X86_32
Ingo Molnard18d6d62008-01-30 13:33:45 +0100285
286int __initdata early_ioremap_debug;
287
288static int __init early_ioremap_debug_setup(char *str)
289{
290 early_ioremap_debug = 1;
291
Huang, Ying793b24a2008-01-30 13:33:45 +0100292 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100293}
Huang, Ying793b24a2008-01-30 13:33:45 +0100294early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100295
Huang, Ying0947b2f2008-01-30 13:33:44 +0100296static __initdata int after_paging_init;
Ian Campbellc92a7a52008-02-17 19:09:42 +0000297static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
298 __section(.bss.page_aligned);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100299
Ian Campbell551889a2008-02-09 23:24:09 +0100300static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100301{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100302 /* Don't assume we're using swapper_pg_dir at this point */
303 pgd_t *base = __va(read_cr3());
304 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a2008-02-09 23:24:09 +0100305 pud_t *pud = pud_offset(pgd, addr);
306 pmd_t *pmd = pmd_offset(pud, addr);
307
308 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100309}
310
Ian Campbell551889a2008-02-09 23:24:09 +0100311static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100312{
Ian Campbell551889a2008-02-09 23:24:09 +0100313 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100314}
315
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100316void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100317{
Ian Campbell551889a2008-02-09 23:24:09 +0100318 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100319
Ingo Molnard18d6d62008-01-30 13:33:45 +0100320 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100321 printk(KERN_INFO "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100322
Ian Campbell551889a2008-02-09 23:24:09 +0100323 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100324 memset(bm_pte, 0, sizeof(bm_pte));
Ian Campbellb6fbb662008-02-09 23:24:09 +0100325 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a2008-02-09 23:24:09 +0100326
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100327 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100328 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100329 * we are not prepared:
330 */
Ian Campbell551889a2008-02-09 23:24:09 +0100331 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100332 WARN_ON(1);
Ian Campbell551889a2008-02-09 23:24:09 +0100333 printk(KERN_WARNING "pmd %p != %p\n",
334 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100335 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100336 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100337 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100338 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100339
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100340 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
341 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
342 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100343 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100344}
345
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100346void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100347{
Ian Campbell551889a2008-02-09 23:24:09 +0100348 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100349
Ingo Molnard18d6d62008-01-30 13:33:45 +0100350 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100351 printk(KERN_INFO "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100352
Ian Campbell551889a2008-02-09 23:24:09 +0100353 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
354 pmd_clear(pmd);
Ian Campbellb6fbb662008-02-09 23:24:09 +0100355 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100356 __flush_tlb_all();
357}
358
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100359void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100360{
361 enum fixed_addresses idx;
Ian Campbell551889a2008-02-09 23:24:09 +0100362 unsigned long addr, phys;
363 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100364
365 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100366 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100367 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100368 pte = early_ioremap_pte(addr);
Ian Campbell551889a2008-02-09 23:24:09 +0100369 if (pte_present(*pte)) {
370 phys = pte_val(*pte) & PAGE_MASK;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100371 set_fixmap(idx, phys);
372 }
373 }
374}
375
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100376static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100377 unsigned long phys, pgprot_t flags)
378{
Ian Campbell551889a2008-02-09 23:24:09 +0100379 unsigned long addr = __fix_to_virt(idx);
380 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100381
382 if (idx >= __end_of_fixed_addresses) {
383 BUG();
384 return;
385 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100386 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100387 if (pgprot_val(flags))
Ian Campbell551889a2008-02-09 23:24:09 +0100388 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100389 else
Ian Campbell551889a2008-02-09 23:24:09 +0100390 pte_clear(NULL, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100391 __flush_tlb_one(addr);
392}
393
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100394static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100395 unsigned long phys)
396{
397 if (after_paging_init)
398 set_fixmap(idx, phys);
399 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100400 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100401}
402
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100403static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100404{
405 if (after_paging_init)
406 clear_fixmap(idx);
407 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100408 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100409}
410
Ingo Molnar1b42f512008-01-30 13:33:45 +0100411
412int __initdata early_ioremap_nested;
413
Ingo Molnard690b2a2008-01-30 13:33:47 +0100414static int __init check_early_ioremap_leak(void)
415{
416 if (!early_ioremap_nested)
417 return 0;
418
419 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100420 "Debug warning: early ioremap leak of %d areas detected.\n",
421 early_ioremap_nested);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100422 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100423 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100424 WARN_ON(1);
425
426 return 1;
427}
428late_initcall(check_early_ioremap_leak);
429
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100430void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431{
432 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100433 unsigned int nrpages, nesting;
434 enum fixed_addresses idx0, idx;
435
436 WARN_ON(system_state != SYSTEM_BOOTING);
437
438 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100439 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100440 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100441 phys_addr, size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100442 dump_stack();
443 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444
445 /* Don't allow wraparound or zero size */
446 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100447 if (!size || last_addr < phys_addr) {
448 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100450 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100452 if (nesting >= FIX_BTMAPS_NESTING) {
453 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100454 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100455 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100456 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700457 /*
458 * Mappings have to be page-aligned
459 */
460 offset = phys_addr & ~PAGE_MASK;
461 phys_addr &= PAGE_MASK;
462 size = PAGE_ALIGN(last_addr) - phys_addr;
463
464 /*
465 * Mappings have to fit in the FIX_BTMAP area.
466 */
467 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100468 if (nrpages > NR_FIX_BTMAPS) {
469 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700470 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100471 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
473 /*
474 * Ok, go for it..
475 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100476 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
477 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100479 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480 phys_addr += PAGE_SIZE;
481 --idx;
482 --nrpages;
483 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100484 if (early_ioremap_debug)
485 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100486
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100487 return (void *) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488}
489
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100490void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491{
492 unsigned long virt_addr;
493 unsigned long offset;
494 unsigned int nrpages;
495 enum fixed_addresses idx;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100496 unsigned int nesting;
497
498 nesting = --early_ioremap_nested;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100499 WARN_ON(nesting < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500
Ingo Molnard18d6d62008-01-30 13:33:45 +0100501 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100502 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100503 size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100504 dump_stack();
505 }
506
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100508 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
509 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100511 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 offset = virt_addr & ~PAGE_MASK;
513 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
514
Ingo Molnar1b42f512008-01-30 13:33:45 +0100515 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100517 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700518 --idx;
519 --nrpages;
520 }
521}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100522
523void __this_fixmap_does_not_exist(void)
524{
525 WARN_ON(1);
526}
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100527
528#endif /* CONFIG_X86_32 */