blob: 17f5188390285f7aca36a27e2befc113bfd8bdff [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010016#include <asm/cacheflush.h>
17#include <asm/e820.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010021#include <asm/pgalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070022
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010023enum ioremap_mode {
24 IOR_MODE_UNCACHED,
25 IOR_MODE_CACHED,
26};
27
Thomas Gleixner240d3a72008-01-30 13:34:05 +010028#ifdef CONFIG_X86_64
29
30unsigned long __phys_addr(unsigned long x)
31{
32 if (x >= __START_KERNEL_map)
33 return x - __START_KERNEL_map + phys_base;
34 return x - PAGE_OFFSET;
35}
36EXPORT_SYMBOL(__phys_addr);
37
Thomas Gleixnere3100c82008-02-27 20:57:40 +010038static inline int phys_addr_valid(unsigned long addr)
39{
40 return addr < (1UL << boot_cpu_data.x86_phys_bits);
41}
42
43#else
44
45static inline int phys_addr_valid(unsigned long addr)
46{
47 return 1;
48}
49
Thomas Gleixner240d3a72008-01-30 13:34:05 +010050#endif
51
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010052int page_is_ram(unsigned long pagenr)
53{
54 unsigned long addr, end;
55 int i;
56
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080057 /*
58 * A special case is the first 4Kb of memory;
59 * This is a BIOS owned area, not kernel ram, but generally
60 * not listed as such in the E820 table.
61 */
62 if (pagenr == 0)
63 return 0;
64
Arjan van de Ven156fbc32008-02-18 09:58:45 -080065 /*
66 * Second special case: Some BIOSen report the PC BIOS
67 * area (640->1Mb) as ram even though it is not.
68 */
69 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
70 pagenr < (BIOS_END >> PAGE_SHIFT))
71 return 0;
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080072
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010073 for (i = 0; i < e820.nr_map; i++) {
74 /*
75 * Not usable memory:
76 */
77 if (e820.map[i].type != E820_RAM)
78 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010079 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
80 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +010081
Thomas Gleixner950f9d92008-01-30 13:34:06 +010082
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010083 if ((pagenr >= addr) && (pagenr < end))
84 return 1;
85 }
86 return 0;
87}
88
Linus Torvalds1da177e2005-04-16 15:20:36 -070089/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010090 * Fix up the linear direct mapping of the kernel to avoid cache attribute
91 * conflicts.
92 */
Thomas Gleixner75ab43b2008-02-04 16:48:05 +010093static int ioremap_change_attr(unsigned long vaddr, unsigned long size,
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010094 enum ioremap_mode mode)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010095{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010096 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +010097 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010098
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010099 switch (mode) {
100 case IOR_MODE_UNCACHED:
101 default:
102 err = set_memory_uc(vaddr, nrpages);
103 break;
104 case IOR_MODE_CACHED:
105 err = set_memory_wb(vaddr, nrpages);
106 break;
107 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100108
109 return err;
110}
111
112/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 * Remap an arbitrary physical address space into the kernel virtual
114 * address space. Needed when the kernel wants to access high addresses
115 * directly.
116 *
117 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
118 * have to convert them into an offset in a page-aligned mapping, but the
119 * caller shouldn't need to know that small detail.
120 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700121static void __iomem *__ioremap(resource_size_t phys_addr, unsigned long size,
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100122 enum ioremap_mode mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123{
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100124 unsigned long pfn, offset, last_addr, vaddr;
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100125 struct vm_struct *area;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100126 pgprot_t prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127
128 /* Don't allow wraparound or zero size */
129 last_addr = phys_addr + size - 1;
130 if (!size || last_addr < phys_addr)
131 return NULL;
132
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100133 if (!phys_addr_valid(phys_addr)) {
134 printk(KERN_WARNING "ioremap: invalid physical address %lx\n",
135 phys_addr);
136 WARN_ON_ONCE(1);
137 return NULL;
138 }
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 /*
141 * Don't remap the low PCI/ISA area, it's always mapped..
142 */
143 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100144 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145
146 /*
147 * Don't allow anybody to remap normal RAM that we're using..
148 */
Ingo Molnar38cb47b2008-02-04 16:47:54 +0100149 for (pfn = phys_addr >> PAGE_SHIFT; pfn < max_pfn_mapped &&
150 (pfn << PAGE_SHIFT) < last_addr; pfn++) {
151 if (page_is_ram(pfn) && pfn_valid(pfn) &&
152 !PageReserved(pfn_to_page(pfn)))
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100153 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154 }
155
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100156 switch (mode) {
157 case IOR_MODE_UNCACHED:
158 default:
Suresh Siddhad546b672008-03-25 17:39:12 -0700159 /*
160 * FIXME: we will use UC MINUS for now, as video fb drivers
161 * depend on it. Upcoming ioremap_wc() will fix this behavior.
162 */
163 prot = PAGE_KERNEL_UC_MINUS;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100164 break;
165 case IOR_MODE_CACHED:
166 prot = PAGE_KERNEL;
167 break;
168 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700169
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 /*
171 * Mappings have to be page-aligned
172 */
173 offset = phys_addr & ~PAGE_MASK;
174 phys_addr &= PAGE_MASK;
175 size = PAGE_ALIGN(last_addr+1) - phys_addr;
176
177 /*
178 * Ok, go for it..
179 */
Thomas Gleixner74ff2852008-01-30 13:34:05 +0100180 area = get_vm_area(size, VM_IOREMAP);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 if (!area)
182 return NULL;
183 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100184 vaddr = (unsigned long) area->addr;
185 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
Ingo Molnarb16bf712008-02-28 14:02:08 +0100186 free_vm_area(area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187 return NULL;
188 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100189
Thomas Gleixner75ab43b2008-02-04 16:48:05 +0100190 if (ioremap_change_attr(vaddr, size, mode) < 0) {
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100191 vunmap(area->addr);
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100192 return NULL;
193 }
194
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100195 return (void __iomem *) (vaddr + offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197
198/**
199 * ioremap_nocache - map bus memory into CPU space
200 * @offset: bus address of the memory
201 * @size: size of the resource to map
202 *
203 * ioremap_nocache performs a platform specific sequence of operations to
204 * make bus memory CPU accessible via the readb/readw/readl/writeb/
205 * writew/writel functions and the other mmio helpers. The returned
206 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100207 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700208 *
209 * This version of ioremap ensures that the memory is marked uncachable
210 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100211 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 * busses. In particular driver authors should read up on PCI writes
213 *
214 * It's useful if some control registers are in such an area and
215 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100216 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 * Must be freed with iounmap.
218 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700219void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100221 return __ioremap(phys_addr, size, IOR_MODE_UNCACHED);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700223EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700225void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100226{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100227 return __ioremap(phys_addr, size, IOR_MODE_CACHED);
Thomas Gleixner5f868152008-01-30 13:34:06 +0100228}
229EXPORT_SYMBOL(ioremap_cache);
230
Andi Kleenbf5421c2005-12-12 22:17:09 -0800231/**
232 * iounmap - Free a IO remapping
233 * @addr: virtual address from ioremap_*
234 *
235 * Caller must ensure there is only one unmapping for the same pointer.
236 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237void iounmap(volatile void __iomem *addr)
238{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800239 struct vm_struct *p, *o;
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700240
241 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 return;
243
244 /*
245 * __ioremap special-cases the PCI/ISA range by not instantiating a
246 * vm_area and by simply returning an address into the kernel mapping
247 * of ISA space. So handle that here.
248 */
249 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100250 addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 return;
252
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100253 addr = (volatile void __iomem *)
254 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800255
256 /* Use the vm area unlocked, assuming the caller
257 ensures there isn't another iounmap for the same address
258 in parallel. Reuse of the virtual address is prevented by
259 leaving it in the global lists until we're done with it.
260 cpa takes care of the direct mappings. */
261 read_lock(&vmlist_lock);
262 for (p = vmlist; p; p = p->next) {
263 if (p->addr == addr)
264 break;
265 }
266 read_unlock(&vmlist_lock);
267
268 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100269 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700270 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800271 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 }
273
Andi Kleenbf5421c2005-12-12 22:17:09 -0800274 /* Finally remove it */
275 o = remove_vm_area((void *)addr);
276 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100277 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700279EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100281#ifdef CONFIG_X86_32
Ingo Molnard18d6d62008-01-30 13:33:45 +0100282
283int __initdata early_ioremap_debug;
284
285static int __init early_ioremap_debug_setup(char *str)
286{
287 early_ioremap_debug = 1;
288
Huang, Ying793b24a2008-01-30 13:33:45 +0100289 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100290}
Huang, Ying793b24a2008-01-30 13:33:45 +0100291early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100292
Huang, Ying0947b2f2008-01-30 13:33:44 +0100293static __initdata int after_paging_init;
Ian Campbellc92a7a52008-02-17 19:09:42 +0000294static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
295 __section(.bss.page_aligned);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100296
Ian Campbell551889a2008-02-09 23:24:09 +0100297static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100298{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100299 /* Don't assume we're using swapper_pg_dir at this point */
300 pgd_t *base = __va(read_cr3());
301 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a2008-02-09 23:24:09 +0100302 pud_t *pud = pud_offset(pgd, addr);
303 pmd_t *pmd = pmd_offset(pud, addr);
304
305 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100306}
307
Ian Campbell551889a2008-02-09 23:24:09 +0100308static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100309{
Ian Campbell551889a2008-02-09 23:24:09 +0100310 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100311}
312
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100313void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100314{
Ian Campbell551889a2008-02-09 23:24:09 +0100315 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100316
Ingo Molnard18d6d62008-01-30 13:33:45 +0100317 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100318 printk(KERN_INFO "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100319
Ian Campbell551889a2008-02-09 23:24:09 +0100320 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100321 memset(bm_pte, 0, sizeof(bm_pte));
Ian Campbellb6fbb662008-02-09 23:24:09 +0100322 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a2008-02-09 23:24:09 +0100323
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100324 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100325 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100326 * we are not prepared:
327 */
Ian Campbell551889a2008-02-09 23:24:09 +0100328 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100329 WARN_ON(1);
Ian Campbell551889a2008-02-09 23:24:09 +0100330 printk(KERN_WARNING "pmd %p != %p\n",
331 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100332 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100333 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100334 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100335 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100336
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100337 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
338 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
339 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100340 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100341}
342
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100343void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100344{
Ian Campbell551889a2008-02-09 23:24:09 +0100345 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100346
Ingo Molnard18d6d62008-01-30 13:33:45 +0100347 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100348 printk(KERN_INFO "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100349
Ian Campbell551889a2008-02-09 23:24:09 +0100350 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
351 pmd_clear(pmd);
Ian Campbellb6fbb662008-02-09 23:24:09 +0100352 paravirt_release_pt(__pa(bm_pte) >> PAGE_SHIFT);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100353 __flush_tlb_all();
354}
355
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100356void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100357{
358 enum fixed_addresses idx;
Ian Campbell551889a2008-02-09 23:24:09 +0100359 unsigned long addr, phys;
360 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100361
362 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100363 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100364 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100365 pte = early_ioremap_pte(addr);
Ian Campbell551889a2008-02-09 23:24:09 +0100366 if (pte_present(*pte)) {
367 phys = pte_val(*pte) & PAGE_MASK;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100368 set_fixmap(idx, phys);
369 }
370 }
371}
372
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100373static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100374 unsigned long phys, pgprot_t flags)
375{
Ian Campbell551889a2008-02-09 23:24:09 +0100376 unsigned long addr = __fix_to_virt(idx);
377 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100378
379 if (idx >= __end_of_fixed_addresses) {
380 BUG();
381 return;
382 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100383 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100384 if (pgprot_val(flags))
Ian Campbell551889a2008-02-09 23:24:09 +0100385 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100386 else
Ian Campbell551889a2008-02-09 23:24:09 +0100387 pte_clear(NULL, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100388 __flush_tlb_one(addr);
389}
390
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100391static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100392 unsigned long phys)
393{
394 if (after_paging_init)
395 set_fixmap(idx, phys);
396 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100397 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100398}
399
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100400static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100401{
402 if (after_paging_init)
403 clear_fixmap(idx);
404 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100405 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100406}
407
Ingo Molnar1b42f512008-01-30 13:33:45 +0100408
409int __initdata early_ioremap_nested;
410
Ingo Molnard690b2a2008-01-30 13:33:47 +0100411static int __init check_early_ioremap_leak(void)
412{
413 if (!early_ioremap_nested)
414 return 0;
415
416 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100417 "Debug warning: early ioremap leak of %d areas detected.\n",
418 early_ioremap_nested);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100419 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100420 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100421 WARN_ON(1);
422
423 return 1;
424}
425late_initcall(check_early_ioremap_leak);
426
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100427void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428{
429 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100430 unsigned int nrpages, nesting;
431 enum fixed_addresses idx0, idx;
432
433 WARN_ON(system_state != SYSTEM_BOOTING);
434
435 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100436 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100437 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100438 phys_addr, size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100439 dump_stack();
440 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
442 /* Don't allow wraparound or zero size */
443 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100444 if (!size || last_addr < phys_addr) {
445 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100447 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100449 if (nesting >= FIX_BTMAPS_NESTING) {
450 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100451 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100452 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100453 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454 /*
455 * Mappings have to be page-aligned
456 */
457 offset = phys_addr & ~PAGE_MASK;
458 phys_addr &= PAGE_MASK;
459 size = PAGE_ALIGN(last_addr) - phys_addr;
460
461 /*
462 * Mappings have to fit in the FIX_BTMAP area.
463 */
464 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100465 if (nrpages > NR_FIX_BTMAPS) {
466 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100468 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
470 /*
471 * Ok, go for it..
472 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100473 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
474 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100476 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477 phys_addr += PAGE_SIZE;
478 --idx;
479 --nrpages;
480 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100481 if (early_ioremap_debug)
482 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100483
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100484 return (void *) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485}
486
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100487void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488{
489 unsigned long virt_addr;
490 unsigned long offset;
491 unsigned int nrpages;
492 enum fixed_addresses idx;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100493 unsigned int nesting;
494
495 nesting = --early_ioremap_nested;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100496 WARN_ON(nesting < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700497
Ingo Molnard18d6d62008-01-30 13:33:45 +0100498 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100499 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100500 size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100501 dump_stack();
502 }
503
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100505 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
506 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509 offset = virt_addr & ~PAGE_MASK;
510 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
511
Ingo Molnar1b42f512008-01-30 13:33:45 +0100512 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700513 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100514 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 --idx;
516 --nrpages;
517 }
518}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100519
520void __this_fixmap_does_not_exist(void)
521{
522 WARN_ON(1);
523}
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100524
525#endif /* CONFIG_X86_32 */