blob: a78ffef62a2bf2f67e5a512e0a847289d75f726f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010016#include <asm/cacheflush.h>
17#include <asm/e820.h>
18#include <asm/fixmap.h>
19#include <asm/pgtable.h>
20#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010021#include <asm/pgalloc.h>
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -070022#include <asm/pat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Thomas Gleixner240d3a72008-01-30 13:34:05 +010024#ifdef CONFIG_X86_64
25
Thomas Gleixnere3100c82008-02-27 20:57:40 +010026static inline int phys_addr_valid(unsigned long addr)
27{
28 return addr < (1UL << boot_cpu_data.x86_phys_bits);
29}
30
Jiri Slaby59ea7462008-06-12 13:56:40 +020031unsigned long __phys_addr(unsigned long x)
32{
33 if (x >= __START_KERNEL_map) {
34 x -= __START_KERNEL_map;
35 VIRTUAL_BUG_ON(x >= KERNEL_IMAGE_SIZE);
36 x += phys_base;
37 } else {
38 VIRTUAL_BUG_ON(x < PAGE_OFFSET);
39 x -= PAGE_OFFSET;
40 VIRTUAL_BUG_ON(system_state == SYSTEM_BOOTING ? x > MAXMEM :
41 !phys_addr_valid(x));
42 }
43 return x;
44}
45EXPORT_SYMBOL(__phys_addr);
46
Thomas Gleixnere3100c82008-02-27 20:57:40 +010047#else
48
49static inline int phys_addr_valid(unsigned long addr)
50{
51 return 1;
52}
53
Jiri Slaby59ea7462008-06-12 13:56:40 +020054unsigned long __phys_addr(unsigned long x)
55{
56 /* VMALLOC_* aren't constants; not available at the boot time */
57 VIRTUAL_BUG_ON(x < PAGE_OFFSET || (system_state != SYSTEM_BOOTING &&
58 is_vmalloc_addr((void *)x)));
59 return x - PAGE_OFFSET;
60}
61EXPORT_SYMBOL(__phys_addr);
62
Thomas Gleixner240d3a72008-01-30 13:34:05 +010063#endif
64
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010065int page_is_ram(unsigned long pagenr)
66{
Ingo Molnar756a6c62008-03-25 08:31:17 +010067 resource_size_t addr, end;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010068 int i;
69
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080070 /*
71 * A special case is the first 4Kb of memory;
72 * This is a BIOS owned area, not kernel ram, but generally
73 * not listed as such in the E820 table.
74 */
75 if (pagenr == 0)
76 return 0;
77
Arjan van de Ven156fbc32008-02-18 09:58:45 -080078 /*
79 * Second special case: Some BIOSen report the PC BIOS
80 * area (640->1Mb) as ram even though it is not.
81 */
82 if (pagenr >= (BIOS_BEGIN >> PAGE_SHIFT) &&
83 pagenr < (BIOS_END >> PAGE_SHIFT))
84 return 0;
Arjan van de Vend8a9e6a2008-02-18 09:54:33 -080085
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010086 for (i = 0; i < e820.nr_map; i++) {
87 /*
88 * Not usable memory:
89 */
90 if (e820.map[i].type != E820_RAM)
91 continue;
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010092 addr = (e820.map[i].addr + PAGE_SIZE-1) >> PAGE_SHIFT;
93 end = (e820.map[i].addr + e820.map[i].size) >> PAGE_SHIFT;
Thomas Gleixner950f9d92008-01-30 13:34:06 +010094
Thomas Gleixner950f9d92008-01-30 13:34:06 +010095
Thomas Gleixner5f5192b2008-01-30 13:34:06 +010096 if ((pagenr >= addr) && (pagenr < end))
97 return 1;
98 }
99 return 0;
100}
101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100103 * Fix up the linear direct mapping of the kernel to avoid cache attribute
104 * conflicts.
105 */
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700106int ioremap_change_attr(unsigned long vaddr, unsigned long size,
107 unsigned long prot_val)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100108{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100109 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +0100110 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100111
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700112 switch (prot_val) {
113 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100114 default:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700115 err = _set_memory_uc(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100116 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700117 case _PAGE_CACHE_WC:
118 err = _set_memory_wc(vaddr, nrpages);
119 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700120 case _PAGE_CACHE_WB:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -0700121 err = _set_memory_wb(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100122 break;
123 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100124
125 return err;
126}
127
128/*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 * Remap an arbitrary physical address space into the kernel virtual
130 * address space. Needed when the kernel wants to access high addresses
131 * directly.
132 *
133 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
134 * have to convert them into an offset in a page-aligned mapping, but the
135 * caller shouldn't need to know that small detail.
136 */
Christoph Lameter23016962008-04-28 02:12:42 -0700137static void __iomem *__ioremap_caller(resource_size_t phys_addr,
138 unsigned long size, unsigned long prot_val, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139{
Ingo Molnar756a6c62008-03-25 08:31:17 +0100140 unsigned long pfn, offset, vaddr;
141 resource_size_t last_addr;
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100142 struct vm_struct *area;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700143 unsigned long new_prot_val;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100144 pgprot_t prot;
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700145 int retval;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146
147 /* Don't allow wraparound or zero size */
148 last_addr = phys_addr + size - 1;
149 if (!size || last_addr < phys_addr)
150 return NULL;
151
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100152 if (!phys_addr_valid(phys_addr)) {
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700153 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700154 (unsigned long long)phys_addr);
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100155 WARN_ON_ONCE(1);
156 return NULL;
157 }
158
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159 /*
160 * Don't remap the low PCI/ISA area, it's always mapped..
161 */
162 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100163 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
165 /*
166 * Don't allow anybody to remap normal RAM that we're using..
167 */
Andres Salomoncb8ab682008-04-30 11:30:24 -0400168 for (pfn = phys_addr >> PAGE_SHIFT;
169 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
170 pfn++) {
Ingo Molnarbdd3cee2008-02-28 14:10:49 +0100171
Ingo Molnarba748d22008-03-03 09:37:41 +0100172 int is_ram = page_is_ram(pfn);
173
174 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100175 return NULL;
Ingo Molnarba748d22008-03-03 09:37:41 +0100176 WARN_ON_ONCE(is_ram);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 }
178
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700179 /*
180 * Mappings have to be page-aligned
181 */
182 offset = phys_addr & ~PAGE_MASK;
183 phys_addr &= PAGE_MASK;
184 size = PAGE_ALIGN(last_addr+1) - phys_addr;
185
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700186 retval = reserve_memtype(phys_addr, phys_addr + size,
187 prot_val, &new_prot_val);
188 if (retval) {
Venki Pallipadib450e5e2008-03-25 16:51:26 -0700189 pr_debug("Warning: reserve_memtype returned %d\n", retval);
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700190 return NULL;
191 }
192
193 if (prot_val != new_prot_val) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700194 /*
195 * Do not fallback to certain memory types with certain
196 * requested type:
Suresh Siddhade33c442008-04-25 17:07:22 -0700197 * - request is uc-, return cannot be write-back
198 * - request is uc-, return cannot be write-combine
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700199 * - request is write-combine, return cannot be write-back
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700200 */
Suresh Siddhade33c442008-04-25 17:07:22 -0700201 if ((prot_val == _PAGE_CACHE_UC_MINUS &&
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700202 (new_prot_val == _PAGE_CACHE_WB ||
203 new_prot_val == _PAGE_CACHE_WC)) ||
204 (prot_val == _PAGE_CACHE_WC &&
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700205 new_prot_val == _PAGE_CACHE_WB)) {
Venki Pallipadib450e5e2008-03-25 16:51:26 -0700206 pr_debug(
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700207 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700208 (unsigned long long)phys_addr,
209 (unsigned long long)(phys_addr + size),
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700210 prot_val, new_prot_val);
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700211 free_memtype(phys_addr, phys_addr + size);
212 return NULL;
213 }
214 prot_val = new_prot_val;
215 }
216
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700217 switch (prot_val) {
218 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100219 default:
Ingo Molnar55c62682008-03-26 06:19:45 +0100220 prot = PAGE_KERNEL_NOCACHE;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100221 break;
Suresh Siddhade33c442008-04-25 17:07:22 -0700222 case _PAGE_CACHE_UC_MINUS:
223 prot = PAGE_KERNEL_UC_MINUS;
224 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700225 case _PAGE_CACHE_WC:
226 prot = PAGE_KERNEL_WC;
227 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700228 case _PAGE_CACHE_WB:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100229 prot = PAGE_KERNEL;
230 break;
231 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700232
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 * Ok, go for it..
235 */
Christoph Lameter23016962008-04-28 02:12:42 -0700236 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 if (!area)
238 return NULL;
239 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100240 vaddr = (unsigned long) area->addr;
241 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700242 free_memtype(phys_addr, phys_addr + size);
Ingo Molnarb16bf712008-02-28 14:02:08 +0100243 free_vm_area(area);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700244 return NULL;
245 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100246
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700247 if (ioremap_change_attr(vaddr, size, prot_val) < 0) {
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700248 free_memtype(phys_addr, phys_addr + size);
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100249 vunmap(area->addr);
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100250 return NULL;
251 }
252
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100253 return (void __iomem *) (vaddr + offset);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700254}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255
256/**
257 * ioremap_nocache - map bus memory into CPU space
258 * @offset: bus address of the memory
259 * @size: size of the resource to map
260 *
261 * ioremap_nocache performs a platform specific sequence of operations to
262 * make bus memory CPU accessible via the readb/readw/readl/writeb/
263 * writew/writel functions and the other mmio helpers. The returned
264 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100265 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 *
267 * This version of ioremap ensures that the memory is marked uncachable
268 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100269 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 * busses. In particular driver authors should read up on PCI writes
271 *
272 * It's useful if some control registers are in such an area and
273 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100274 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 * Must be freed with iounmap.
276 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700277void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278{
Suresh Siddhade33c442008-04-25 17:07:22 -0700279 /*
280 * Ideally, this should be:
281 * pat_wc_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
282 *
283 * Till we fix all X drivers to use ioremap_wc(), we will use
284 * UC MINUS.
285 */
286 unsigned long val = _PAGE_CACHE_UC_MINUS;
287
288 return __ioremap_caller(phys_addr, size, val,
Christoph Lameter23016962008-04-28 02:12:42 -0700289 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700291EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700293/**
294 * ioremap_wc - map memory into CPU space write combined
295 * @offset: bus address of the memory
296 * @size: size of the resource to map
297 *
298 * This version of ioremap ensures that the memory is marked write combining.
299 * Write combining allows faster writes to some hardware devices.
300 *
301 * Must be freed with iounmap.
302 */
303void __iomem *ioremap_wc(unsigned long phys_addr, unsigned long size)
304{
305 if (pat_wc_enabled)
Christoph Lameter23016962008-04-28 02:12:42 -0700306 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
307 __builtin_return_address(0));
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700308 else
309 return ioremap_nocache(phys_addr, size);
310}
311EXPORT_SYMBOL(ioremap_wc);
312
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700313void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100314{
Christoph Lameter23016962008-04-28 02:12:42 -0700315 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
316 __builtin_return_address(0));
Thomas Gleixner5f868152008-01-30 13:34:06 +0100317}
318EXPORT_SYMBOL(ioremap_cache);
319
Andi Kleenbf5421c2005-12-12 22:17:09 -0800320/**
321 * iounmap - Free a IO remapping
322 * @addr: virtual address from ioremap_*
323 *
324 * Caller must ensure there is only one unmapping for the same pointer.
325 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700326void iounmap(volatile void __iomem *addr)
327{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800328 struct vm_struct *p, *o;
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700329
330 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331 return;
332
333 /*
334 * __ioremap special-cases the PCI/ISA range by not instantiating a
335 * vm_area and by simply returning an address into the kernel mapping
336 * of ISA space. So handle that here.
337 */
338 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100339 addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 return;
341
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100342 addr = (volatile void __iomem *)
343 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800344
345 /* Use the vm area unlocked, assuming the caller
346 ensures there isn't another iounmap for the same address
347 in parallel. Reuse of the virtual address is prevented by
348 leaving it in the global lists until we're done with it.
349 cpa takes care of the direct mappings. */
350 read_lock(&vmlist_lock);
351 for (p = vmlist; p; p = p->next) {
352 if (p->addr == addr)
353 break;
354 }
355 read_unlock(&vmlist_lock);
356
357 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100358 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700359 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800360 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 }
362
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700363 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
364
Andi Kleenbf5421c2005-12-12 22:17:09 -0800365 /* Finally remove it */
366 o = remove_vm_area((void *)addr);
367 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100368 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700370EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700372/*
373 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
374 * access
375 */
376void *xlate_dev_mem_ptr(unsigned long phys)
377{
378 void *addr;
379 unsigned long start = phys & PAGE_MASK;
380
381 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
382 if (page_is_ram(start >> PAGE_SHIFT))
383 return __va(phys);
384
385 addr = (void *)ioremap(start, PAGE_SIZE);
386 if (addr)
387 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
388
389 return addr;
390}
391
392void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
393{
394 if (page_is_ram(phys >> PAGE_SHIFT))
395 return;
396
397 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
398 return;
399}
400
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100401#ifdef CONFIG_X86_32
Ingo Molnard18d6d62008-01-30 13:33:45 +0100402
403int __initdata early_ioremap_debug;
404
405static int __init early_ioremap_debug_setup(char *str)
406{
407 early_ioremap_debug = 1;
408
Huang, Ying793b24a2008-01-30 13:33:45 +0100409 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100410}
Huang, Ying793b24a2008-01-30 13:33:45 +0100411early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100412
Huang, Ying0947b2f2008-01-30 13:33:44 +0100413static __initdata int after_paging_init;
Ian Campbellc92a7a52008-02-17 19:09:42 +0000414static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)]
415 __section(.bss.page_aligned);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100416
Ian Campbell551889a2008-02-09 23:24:09 +0100417static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100418{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100419 /* Don't assume we're using swapper_pg_dir at this point */
420 pgd_t *base = __va(read_cr3());
421 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a2008-02-09 23:24:09 +0100422 pud_t *pud = pud_offset(pgd, addr);
423 pmd_t *pmd = pmd_offset(pud, addr);
424
425 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100426}
427
Ian Campbell551889a2008-02-09 23:24:09 +0100428static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100429{
Ian Campbell551889a2008-02-09 23:24:09 +0100430 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100431}
432
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100433void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100434{
Ian Campbell551889a2008-02-09 23:24:09 +0100435 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100436
Ingo Molnard18d6d62008-01-30 13:33:45 +0100437 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100438 printk(KERN_INFO "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100439
Ian Campbell551889a2008-02-09 23:24:09 +0100440 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100441 memset(bm_pte, 0, sizeof(bm_pte));
Ian Campbellb6fbb662008-02-09 23:24:09 +0100442 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a2008-02-09 23:24:09 +0100443
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100444 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100445 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100446 * we are not prepared:
447 */
Ian Campbell551889a2008-02-09 23:24:09 +0100448 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100449 WARN_ON(1);
Ian Campbell551889a2008-02-09 23:24:09 +0100450 printk(KERN_WARNING "pmd %p != %p\n",
451 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100452 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100453 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100454 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100455 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100456
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100457 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
458 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
459 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100460 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100461}
462
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100463void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100464{
Ian Campbell551889a2008-02-09 23:24:09 +0100465 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100466
Ingo Molnard18d6d62008-01-30 13:33:45 +0100467 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100468 printk(KERN_INFO "early_ioremap_clear()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100469
Ian Campbell551889a2008-02-09 23:24:09 +0100470 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
471 pmd_clear(pmd);
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700472 paravirt_release_pte(__pa(bm_pte) >> PAGE_SHIFT);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100473 __flush_tlb_all();
474}
475
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100476void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100477{
478 enum fixed_addresses idx;
Ian Campbell551889a2008-02-09 23:24:09 +0100479 unsigned long addr, phys;
480 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100481
482 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100483 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100484 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100485 pte = early_ioremap_pte(addr);
Ian Campbell551889a2008-02-09 23:24:09 +0100486 if (pte_present(*pte)) {
487 phys = pte_val(*pte) & PAGE_MASK;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100488 set_fixmap(idx, phys);
489 }
490 }
491}
492
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100493static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100494 unsigned long phys, pgprot_t flags)
495{
Ian Campbell551889a2008-02-09 23:24:09 +0100496 unsigned long addr = __fix_to_virt(idx);
497 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100498
499 if (idx >= __end_of_fixed_addresses) {
500 BUG();
501 return;
502 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100503 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100504 if (pgprot_val(flags))
Ian Campbell551889a2008-02-09 23:24:09 +0100505 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100506 else
Ian Campbell551889a2008-02-09 23:24:09 +0100507 pte_clear(NULL, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100508 __flush_tlb_one(addr);
509}
510
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100511static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100512 unsigned long phys)
513{
514 if (after_paging_init)
515 set_fixmap(idx, phys);
516 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100517 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100518}
519
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100520static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100521{
522 if (after_paging_init)
523 clear_fixmap(idx);
524 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100525 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100526}
527
Ingo Molnar1b42f512008-01-30 13:33:45 +0100528
529int __initdata early_ioremap_nested;
530
Ingo Molnard690b2a2008-01-30 13:33:47 +0100531static int __init check_early_ioremap_leak(void)
532{
533 if (!early_ioremap_nested)
534 return 0;
535
536 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100537 "Debug warning: early ioremap leak of %d areas detected.\n",
538 early_ioremap_nested);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100539 printk(KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100540 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100541 WARN_ON(1);
542
543 return 1;
544}
545late_initcall(check_early_ioremap_leak);
546
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100547void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548{
549 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100550 unsigned int nrpages, nesting;
551 enum fixed_addresses idx0, idx;
552
553 WARN_ON(system_state != SYSTEM_BOOTING);
554
555 nesting = early_ioremap_nested;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100556 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100557 printk(KERN_INFO "early_ioremap(%08lx, %08lx) [%d] => ",
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100558 phys_addr, size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100559 dump_stack();
560 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700561
562 /* Don't allow wraparound or zero size */
563 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100564 if (!size || last_addr < phys_addr) {
565 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100567 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700568
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100569 if (nesting >= FIX_BTMAPS_NESTING) {
570 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100571 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100572 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100573 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574 /*
575 * Mappings have to be page-aligned
576 */
577 offset = phys_addr & ~PAGE_MASK;
578 phys_addr &= PAGE_MASK;
579 size = PAGE_ALIGN(last_addr) - phys_addr;
580
581 /*
582 * Mappings have to fit in the FIX_BTMAP area.
583 */
584 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100585 if (nrpages > NR_FIX_BTMAPS) {
586 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100588 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589
590 /*
591 * Ok, go for it..
592 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100593 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
594 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100596 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 phys_addr += PAGE_SIZE;
598 --idx;
599 --nrpages;
600 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100601 if (early_ioremap_debug)
602 printk(KERN_CONT "%08lx + %08lx\n", offset, fix_to_virt(idx0));
Ingo Molnar1b42f512008-01-30 13:33:45 +0100603
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100604 return (void *) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605}
606
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100607void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700608{
609 unsigned long virt_addr;
610 unsigned long offset;
611 unsigned int nrpages;
612 enum fixed_addresses idx;
Ingo Molnar226e9a92008-05-27 09:56:49 +0200613 int nesting;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100614
615 nesting = --early_ioremap_nested;
Ingo Molnar226e9a92008-05-27 09:56:49 +0200616 if (WARN_ON(nesting < 0))
617 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700618
Ingo Molnard18d6d62008-01-30 13:33:45 +0100619 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100620 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100621 size, nesting);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100622 dump_stack();
623 }
624
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100626 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
627 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100629 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700630 offset = virt_addr & ~PAGE_MASK;
631 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
632
Ingo Molnar1b42f512008-01-30 13:33:45 +0100633 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100635 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636 --idx;
637 --nrpages;
638 }
639}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100640
641void __this_fixmap_does_not_exist(void)
642{
643 WARN_ON(1);
644}
Thomas Gleixner240d3a72008-01-30 13:34:05 +0100645
646#endif /* CONFIG_X86_32 */