blob: 2ac09a5822cb230fd3a29f94f1ec684e6822864d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010016#include <asm/cacheflush.h>
17#include <asm/e820.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010021#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Thomas Gleixner240d3a72008-01-30 13:34:05 +010023#ifdef CONFIG_X86_64
24
25unsigned long __phys_addr(unsigned long x)
26{
27 if (x >= __START_KERNEL_map)
28 return x - __START_KERNEL_map + phys_base;
29 return x - PAGE_OFFSET;
30}
31EXPORT_SYMBOL(__phys_addr);
32
Thomas Gleixnere3100c82008-02-27 20:57:40 +010033static inline int phys_addr_valid(unsigned long addr)
34{
35 return addr < (1UL << boot_cpu_data.x86_phys_bits);
36}
37
38#else
39
40static inline int phys_addr_valid(unsigned long addr)
41{
42 return 1;
43}
44
Thomas Gleixner240d3a72008-01-30 13:34:05 +010045#endif
46
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010047int page_is_ram(unsigned long pagenr)
48{
49 unsigned long addr, end;
50 int i;
51
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080052 /*
53 * A special case is the first 4Kb of memory;
54 * This is a BIOS owned area, not kernel ram, but generally
55 * not listed as such in the E820 table.
56 */
57 if (pagenr == 0)
58 return 0;
59
Arjan van de Ven156fbc32008-02-18 09:58:45 -080060 /*
61 * Second special case: Some BIOSen report the PC BIOS
62 * area (640->1Mb) as ram even though it is not.
63 */
64 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
65 pagenr < (BIOS_END >> PAGE_SHIFT))
66 return 0;
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080067
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010068 for (i = 0; i < e820.nr_map; i++) {
69 /*
70 * Not usable memory:
71 */
72 if (e820.map[i].type != E820_RAM)
73 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010074 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
75 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +010076
Thomas Gleixner950f9d92008-01-30 13:34:06 +010077
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010078 if ((pagenr >= addr) && (pagenr < end))
79 return 1;
80 }
81 return 0;
82}
83
Linus Torvalds1da177e2005-04-16 15:20:36 -070084/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010085 * Fix up the linear direct mapping of the kernel to avoid cache attribute
86 * conflicts.
87 */
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070088int ioremap_change_attr(unsigned long vaddr, unsigned long size,
89 unsigned long prot_val)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010090{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010091 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +010092 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010093
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070094 switch (prot_val) {
95 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010096 default:
97 err = set_memory_uc(vaddr, nrpages);
98 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070099 case _PAGE_CACHE_WB:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100100 err = set_memory_wb(vaddr, nrpages);
101 break;
102 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100103
104 return err;
105}
106
107/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 * Remap an arbitrary physical address space into the kernel virtual
109 * address space. Needed when the kernel wants to access high addresses
110 * directly.
111 *
112 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
113 * have to convert them into an offset in a page-aligned mapping, but the
114 * caller shouldn't need to know that small detail.
115 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700116static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700117 unsigned long prot_val)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118{
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100119 unsigned long pfn, offset, last_addr, vaddr;
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100120 struct vm_struct *area;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100121 pgprot_t prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
123 /* Don't allow wraparound or zero size */
124 last_addr = phys_addr + size - 1;
125 if (!size || last_addr < phys_addr)
126 return NULL;
127
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100128 if (!phys_addr_valid(phys_addr)) {
129 printk(KERN_WARNING "ioremap: invalid physical address %lx\n",
130 phys_addr);
131 WARN_ON_ONCE(1);
132 return NULL;
133 }
134
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 /*
136 * Don't remap the low PCI/ISA area, it's always mapped..
137 */
138 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100139 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140
141 /*
142 * Don't allow anybody to remap normal RAM that we're using..
143 */
Ingo Molnarbdd3cee2008-02-28 14:10:49 +0100144 for (pfn = phys_addr >> PAGE_SHIFT;
145 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
146
Ingo Molnarba748d22008-03-03 09:37:41 +0100147 int is_ram = page_is_ram(pfn);
148
149 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100150 return NULL;
Ingo Molnarba748d22008-03-03 09:37:41 +0100151 WARN_ON_ONCE(is_ram);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 }
153
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700154 switch (prot_val) {
155 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100156 default:
Ingo Molnar55c62682008-03-26 06:19:45 +0100157 prot = PAGE_KERNEL_NOCACHE;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100158 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700159 case _PAGE_CACHE_WB:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100160 prot = PAGE_KERNEL;
161 break;
162 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700163
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 /*
165 * Mappings have to be page-aligned
166 */
167 offset = phys_addr & ~PAGE_MASK;
168 phys_addr &= PAGE_MASK;
169 size = PAGE_ALIGN(last_addr+1) - phys_addr;
170
171 /*
172 * Ok, go for it..
173 */
Thomas Gleixner74ff2852008-01-30 13:34:05 +0100174 area = get_vm_area(size, VM_IOREMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700175 if (!area)
176 return NULL;
177 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100178 vaddr = (unsigned long) area->addr;
179 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
Ingo Molnarb16bf712008-02-28 14:02:08 +0100180 free_vm_area(area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 return NULL;
182 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100183
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700184 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100185 vunmap(area->addr);
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100186 return NULL;
187 }
188
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100189 return (void __iomem *) (vaddr + offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191
192/**
193 * ioremap_nocache - map bus memory into CPU space
194 * @offset: bus address of the memory
195 * @size: size of the resource to map
196 *
197 * ioremap_nocache performs a platform specific sequence of operations to
198 * make bus memory CPU accessible via the readb/readw/readl/writeb/
199 * writew/writel functions and the other mmio helpers. The returned
200 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100201 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 *
203 * This version of ioremap ensures that the memory is marked uncachable
204 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100205 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700206 * busses. In particular driver authors should read up on PCI writes
207 *
208 * It's useful if some control registers are in such an area and
209 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100210 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 * Must be freed with iounmap.
212 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700213void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214{
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700215 return __ioremap(phys_addr, size, _PAGE_CACHE_UC);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700217EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700219void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100220{
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700221 return __ioremap(phys_addr, size, _PAGE_CACHE_WB);
Thomas Gleixner5f868152008-01-30 13:34:06 +0100222}
223EXPORT_SYMBOL(ioremap_cache);
224
Andi Kleenbf5421c2005-12-12 22:17:09 -0800225/**
226 * iounmap - Free a IO remapping
227 * @addr: virtual address from ioremap_*
228 *
229 * Caller must ensure there is only one unmapping for the same pointer.
230 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231void iounmap(volatile void __iomem *addr)
232{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800233 struct vm_struct *p, *o;
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700234
235 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 return;
237
238 /*
239 * __ioremap special-cases the PCI/ISA range by not instantiating a
240 * vm_area and by simply returning an address into the kernel mapping
241 * of ISA space. So handle that here.
242 */
243 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100244 addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245 return;
246
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100247 addr = (volatile void __iomem *)
248 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800249
250 /* Use the vm area unlocked, assuming the caller
251 ensures there isn't another iounmap for the same address
252 in parallel. Reuse of the virtual address is prevented by
253 leaving it in the global lists until we're done with it.
254 cpa takes care of the direct mappings. */
255 read_lock(&vmlist_lock);
256 for (p = vmlist; p; p = p->next) {
257 if (p->addr == addr)
258 break;
259 }
260 read_unlock(&vmlist_lock);
261
262 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100263 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700264 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800265 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 }
267
Andi Kleenbf5421c2005-12-12 22:17:09 -0800268 /* Finally remove it */
269 o = remove_vm_area((void *)addr);
270 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100271 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700273EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100275#ifdef CONFIG_X86_32
Ingo Molnard18d6d62008-01-30 13:33:45 +0100276
277int __initdata early_ioremap_debug;
278
279static int __init early_ioremap_debug_setup(char *str)
280{
281 early_ioremap_debug = 1;
282
Huang, Ying793b24a2008-01-30 13:33:45 +0100283 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100284}
Huang, Ying793b24a2008-01-30 13:33:45 +0100285early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100286
Huang, Ying0947b2f2008-01-30 13:33:44 +0100287static __initdata int after_paging_init;
Ian Campbellc92a7a52008-02-17 19:09:42 +0000288static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
289 __section(.bss.page_aligned);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100290
Ian Campbell551889a2008-02-09 23:24:09 +0100291static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100292{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100293 /* Don't assume we're using swapper_pg_dir at this point */
294 pgd_t *base = __va(read_cr3());
295 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a2008-02-09 23:24:09 +0100296 pud_t *pud = pud_offset(pgd, addr);
297 pmd_t *pmd = pmd_offset(pud, addr);
298
299 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100300}
301
Ian Campbell551889a2008-02-09 23:24:09 +0100302static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100303{
Ian Campbell551889a2008-02-09 23:24:09 +0100304 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100305}
306
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100307void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100308{
Ian Campbell551889a2008-02-09 23:24:09 +0100309 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100310
Ingo Molnard18d6d62008-01-30 13:33:45 +0100311 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100312 printk(KERN_INFO "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100313
Ian Campbell551889a2008-02-09 23:24:09 +0100314 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100315 memset(bm_pte, 0, sizeof(bm_pte));
Ian Campbellb6fbb662008-02-09 23:24:09 +0100316 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a2008-02-09 23:24:09 +0100317
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100318 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100319 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100320 * we are not prepared:
321 */
Ian Campbell551889a2008-02-09 23:24:09 +0100322 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100323 WARN_ON(1);
Ian Campbell551889a2008-02-09 23:24:09 +0100324 printk(KERN_WARNING "pmd %p != %p\n",
325 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100326 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100327 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100328 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100329 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100330
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100331 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
332 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
333 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100334 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100335}
336
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100337void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100338{
Ian Campbell551889a2008-02-09 23:24:09 +0100339 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100340
Ingo Molnard18d6d62008-01-30 13:33:45 +0100341 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100342 printk(KERN_INFO "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100343
Ian Campbell551889a2008-02-09 23:24:09 +0100344 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
345 pmd_clear(pmd);
Ian Campbellb6fbb662008-02-09 23:24:09 +0100346 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100347 __flush_tlb_all();
348}
349
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100350void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100351{
352 enum fixed_addresses idx;
Ian Campbell551889a2008-02-09 23:24:09 +0100353 unsigned long addr, phys;
354 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100355
356 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100357 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100358 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100359 pte = early_ioremap_pte(addr);
Ian Campbell551889a2008-02-09 23:24:09 +0100360 if (pte_present(*pte)) {
361 phys = pte_val(*pte) & PAGE_MASK;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100362 set_fixmap(idx, phys);
363 }
364 }
365}
366
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100367static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100368 unsigned long phys, pgprot_t flags)
369{
Ian Campbell551889a2008-02-09 23:24:09 +0100370 unsigned long addr = __fix_to_virt(idx);
371 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100372
373 if (idx >= __end_of_fixed_addresses) {
374 BUG();
375 return;
376 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100377 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100378 if (pgprot_val(flags))
Ian Campbell551889a2008-02-09 23:24:09 +0100379 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100380 else
Ian Campbell551889a2008-02-09 23:24:09 +0100381 pte_clear(NULL, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100382 __flush_tlb_one(addr);
383}
384
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100385static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100386 unsigned long phys)
387{
388 if (after_paging_init)
389 set_fixmap(idx, phys);
390 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100391 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100392}
393
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100394static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100395{
396 if (after_paging_init)
397 clear_fixmap(idx);
398 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100399 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100400}
401
Ingo Molnar1b42f512008-01-30 13:33:45 +0100402
403int __initdata early_ioremap_nested;
404
Ingo Molnard690b2a2008-01-30 13:33:47 +0100405static int __init check_early_ioremap_leak(void)
406{
407 if (!early_ioremap_nested)
408 return 0;
409
410 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100411 "Debug warning: early ioremap leak of %d areas detected.\n",
412 early_ioremap_nested);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100413 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100414 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100415 WARN_ON(1);
416
417 return 1;
418}
419late_initcall(check_early_ioremap_leak);
420
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100421void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422{
423 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100424 unsigned int nrpages, nesting;
425 enum fixed_addresses idx0, idx;
426
427 WARN_ON(system_state != SYSTEM_BOOTING);
428
429 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100430 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100431 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100432 phys_addr, size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100433 dump_stack();
434 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
436 /* Don't allow wraparound or zero size */
437 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100438 if (!size || last_addr < phys_addr) {
439 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700440 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100441 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100443 if (nesting >= FIX_BTMAPS_NESTING) {
444 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100445 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100446 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100447 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448 /*
449 * Mappings have to be page-aligned
450 */
451 offset = phys_addr & ~PAGE_MASK;
452 phys_addr &= PAGE_MASK;
453 size = PAGE_ALIGN(last_addr) - phys_addr;
454
455 /*
456 * Mappings have to fit in the FIX_BTMAP area.
457 */
458 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100459 if (nrpages > NR_FIX_BTMAPS) {
460 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100462 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463
464 /*
465 * Ok, go for it..
466 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100467 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
468 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100470 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471 phys_addr += PAGE_SIZE;
472 --idx;
473 --nrpages;
474 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100475 if (early_ioremap_debug)
476 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100477
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100478 return (void *) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479}
480
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100481void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700482{
483 unsigned long virt_addr;
484 unsigned long offset;
485 unsigned int nrpages;
486 enum fixed_addresses idx;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100487 unsigned int nesting;
488
489 nesting = --early_ioremap_nested;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100490 WARN_ON(nesting < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700491
Ingo Molnard18d6d62008-01-30 13:33:45 +0100492 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100493 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100494 size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100495 dump_stack();
496 }
497
Linus Torvalds1da177e2005-04-16 15:20:36 -0700498 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100499 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
500 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700501 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100502 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 offset = virt_addr & ~PAGE_MASK;
504 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
505
Ingo Molnar1b42f512008-01-30 13:33:45 +0100506 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100508 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 --idx;
510 --nrpages;
511 }
512}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100513
514void __this_fixmap_does_not_exist(void)
515{
516 WARN_ON(1);
517}
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100518
519#endif /* CONFIG_X86_32 */