blob: 12e4d2d3c1105e24990808cd898897bdefebb403 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Pekka Paalanend61fc442008-05-12 21:20:57 +020015#include <linux/mmiotrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010017#include <asm/cacheflush.h>
18#include <asm/e820.h>
19#include <asm/fixmap.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010022#include <asm/pgalloc.h>
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -070023#include <asm/pat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Jeremy Fitzhardinge78c86e52009-09-10 10:09:38 -070025#include "physaddr.h"
Thomas Gleixner240d3a72008-01-30 13:34:05 +010026
Linus Torvalds1da177e2005-04-16 15:20:36 -070027/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010028 * Fix up the linear direct mapping of the kernel to avoid cache attribute
29 * conflicts.
30 */
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070031int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 unsigned long prot_val)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010033{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010034 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +010035 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010036
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070037 switch (prot_val) {
38 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010039 default:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070040 err = _set_memory_uc(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010041 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -070042 case _PAGE_CACHE_WC:
43 err = _set_memory_wc(vaddr, nrpages);
44 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070045 case _PAGE_CACHE_WB:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070046 err = _set_memory_wb(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010047 break;
48 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010049
50 return err;
51}
52
53/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070054 * Remap an arbitrary physical address space into the kernel virtual
55 * address space. Needed when the kernel wants to access high addresses
56 * directly.
57 *
58 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
59 * have to convert them into an offset in a page-aligned mapping, but the
60 * caller shouldn't need to know that small detail.
61 */
Christoph Lameter23016962008-04-28 02:12:42 -070062static void __iomem *__ioremap_caller(resource_size_t phys_addr,
63 unsigned long size, unsigned long prot_val, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -070064{
Ingo Molnar756a6c62008-03-25 08:31:17 +010065 unsigned long pfn, offset, vaddr;
66 resource_size_t last_addr;
Pekka Paalanen87e547f2008-05-12 21:21:03 +020067 const resource_size_t unaligned_phys_addr = phys_addr;
68 const unsigned long unaligned_size = size;
Thomas Gleixner91eebf42008-01-30 13:34:05 +010069 struct vm_struct *area;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -070070 unsigned long new_prot_val;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010071 pgprot_t prot;
Venki Pallipadidee7cbb2008-03-24 14:39:55 -070072 int retval;
Pekka Paalanend61fc442008-05-12 21:20:57 +020073 void __iomem *ret_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070074
75 /* Don't allow wraparound or zero size */
76 last_addr = phys_addr + size - 1;
77 if (!size || last_addr < phys_addr)
78 return NULL;
79
Thomas Gleixnere3100c82008-02-27 20:57:40 +010080 if (!phys_addr_valid(phys_addr)) {
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -070081 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -070082 (unsigned long long)phys_addr);
Thomas Gleixnere3100c82008-02-27 20:57:40 +010083 WARN_ON_ONCE(1);
84 return NULL;
85 }
86
Linus Torvalds1da177e2005-04-16 15:20:36 -070087 /*
88 * Don't remap the low PCI/ISA area, it's always mapped..
89 */
Andreas Herrmannbcc643d2008-06-20 21:58:46 +020090 if (is_ISA_range(phys_addr, last_addr))
Thomas Gleixner4b40fce2008-01-30 13:34:05 +010091 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
93 /*
Suresh Siddha379daf62008-09-25 18:43:34 -070094 * Check if the request spans more than any BAR in the iomem resource
95 * tree.
96 */
Ingo Molnar88085002008-12-12 09:20:12 +010097 WARN_ONCE(iomem_map_sanity_check(phys_addr, size),
98 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
Suresh Siddha379daf62008-09-25 18:43:34 -070099
100 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 * Don't allow anybody to remap normal RAM that we're using..
102 */
Andres Salomoncb8ab682008-04-30 11:30:24 -0400103 for (pfn = phys_addr >> PAGE_SHIFT;
104 (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK);
105 pfn++) {
Ingo Molnarbdd3cee2008-02-28 14:10:49 +0100106
Ingo Molnarba748d22008-03-03 09:37:41 +0100107 int is_ram = page_is_ram(pfn);
108
109 if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn)))
Thomas Gleixner266b9f82008-01-30 13:34:06 +0100110 return NULL;
Ingo Molnarba748d22008-03-03 09:37:41 +0100111 WARN_ON_ONCE(is_ram);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112 }
113
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700114 /*
115 * Mappings have to be page-aligned
116 */
117 offset = phys_addr & ~PAGE_MASK;
118 phys_addr &= PAGE_MASK;
119 size = PAGE_ALIGN(last_addr+1) - phys_addr;
120
Andi Kleene213e872008-08-15 18:12:47 +0200121 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700122 prot_val, &new_prot_val);
123 if (retval) {
Venkatesh Pallipadi279e6692009-07-10 09:57:33 -0700124 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700125 return NULL;
126 }
127
128 if (prot_val != new_prot_val) {
H. Peter Anvinb8551922009-08-26 17:17:51 -0700129 if (!is_new_memtype_allowed(phys_addr, size,
130 prot_val, new_prot_val)) {
Venkatesh Pallipadi279e6692009-07-10 09:57:33 -0700131 printk(KERN_ERR
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700132 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700133 (unsigned long long)phys_addr,
134 (unsigned long long)(phys_addr + size),
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700135 prot_val, new_prot_val);
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800136 goto err_free_memtype;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700137 }
138 prot_val = new_prot_val;
139 }
140
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700141 switch (prot_val) {
142 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100143 default:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700144 prot = PAGE_KERNEL_IO_NOCACHE;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100145 break;
Suresh Siddhade33c442008-04-25 17:07:22 -0700146 case _PAGE_CACHE_UC_MINUS:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700147 prot = PAGE_KERNEL_IO_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700148 break;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700149 case _PAGE_CACHE_WC:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700150 prot = PAGE_KERNEL_IO_WC;
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700151 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700152 case _PAGE_CACHE_WB:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700153 prot = PAGE_KERNEL_IO;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100154 break;
155 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700156
Linus Torvalds1da177e2005-04-16 15:20:36 -0700157 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 * Ok, go for it..
159 */
Christoph Lameter23016962008-04-28 02:12:42 -0700160 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 if (!area)
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800162 goto err_free_memtype;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100164 vaddr = (unsigned long) area->addr;
Suresh Siddha43a432b2009-04-09 14:26:47 -0700165
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800166 if (kernel_map_sync_memtype(phys_addr, size, prot_val))
167 goto err_free_area;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100168
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800169 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
170 goto err_free_area;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100171
Pekka Paalanend61fc442008-05-12 21:20:57 +0200172 ret_addr = (void __iomem *) (vaddr + offset);
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200173 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
Pekka Paalanend61fc442008-05-12 21:20:57 +0200174
175 return ret_addr;
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800176err_free_area:
177 free_vm_area(area);
178err_free_memtype:
179 free_memtype(phys_addr, phys_addr + size);
180 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182
183/**
184 * ioremap_nocache - map bus memory into CPU space
185 * @offset: bus address of the memory
186 * @size: size of the resource to map
187 *
188 * ioremap_nocache performs a platform specific sequence of operations to
189 * make bus memory CPU accessible via the readb/readw/readl/writeb/
190 * writew/writel functions and the other mmio helpers. The returned
191 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100192 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 *
194 * This version of ioremap ensures that the memory is marked uncachable
195 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100196 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 * busses. In particular driver authors should read up on PCI writes
198 *
199 * It's useful if some control registers are in such an area and
200 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100201 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 * Must be freed with iounmap.
203 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700204void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
Suresh Siddhade33c442008-04-25 17:07:22 -0700206 /*
207 * Ideally, this should be:
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200208 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700209 *
210 * Till we fix all X drivers to use ioremap_wc(), we will use
211 * UC MINUS.
212 */
213 unsigned long val = _PAGE_CACHE_UC_MINUS;
214
215 return __ioremap_caller(phys_addr, size, val,
Christoph Lameter23016962008-04-28 02:12:42 -0700216 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700218EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700219
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700220/**
221 * ioremap_wc - map memory into CPU space write combined
222 * @offset: bus address of the memory
223 * @size: size of the resource to map
224 *
225 * This version of ioremap ensures that the memory is marked write combining.
226 * Write combining allows faster writes to some hardware devices.
227 *
228 * Must be freed with iounmap.
229 */
venkatesh.pallipadi@intel.comd639bab2009-01-09 16:13:13 -0800230void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700231{
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200232 if (pat_enabled)
Christoph Lameter23016962008-04-28 02:12:42 -0700233 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
234 __builtin_return_address(0));
venkatesh.pallipadi@intel.comb310f382008-03-18 17:00:24 -0700235 else
236 return ioremap_nocache(phys_addr, size);
237}
238EXPORT_SYMBOL(ioremap_wc);
239
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700240void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100241{
Christoph Lameter23016962008-04-28 02:12:42 -0700242 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
243 __builtin_return_address(0));
Thomas Gleixner5f868152008-01-30 13:34:06 +0100244}
245EXPORT_SYMBOL(ioremap_cache);
246
Rik van Riel28b2ee22008-07-23 21:27:05 -0700247void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
248 unsigned long prot_val)
249{
250 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
251 __builtin_return_address(0));
252}
253EXPORT_SYMBOL(ioremap_prot);
254
Andi Kleenbf5421c2005-12-12 22:17:09 -0800255/**
256 * iounmap - Free a IO remapping
257 * @addr: virtual address from ioremap_*
258 *
259 * Caller must ensure there is only one unmapping for the same pointer.
260 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700261void iounmap(volatile void __iomem *addr)
262{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800263 struct vm_struct *p, *o;
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700264
265 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 return;
267
268 /*
269 * __ioremap special-cases the PCI/ISA range by not instantiating a
270 * vm_area and by simply returning an address into the kernel mapping
271 * of ISA space. So handle that here.
272 */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200273 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
274 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275 return;
276
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100277 addr = (volatile void __iomem *)
278 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800279
Pekka Paalanend61fc442008-05-12 21:20:57 +0200280 mmiotrace_iounmap(addr);
281
Andi Kleenbf5421c2005-12-12 22:17:09 -0800282 /* Use the vm area unlocked, assuming the caller
283 ensures there isn't another iounmap for the same address
284 in parallel. Reuse of the virtual address is prevented by
285 leaving it in the global lists until we're done with it.
286 cpa takes care of the direct mappings. */
287 read_lock(&vmlist_lock);
288 for (p = vmlist; p; p = p->next) {
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200289 if (p->addr == (void __force *)addr)
Andi Kleenbf5421c2005-12-12 22:17:09 -0800290 break;
291 }
292 read_unlock(&vmlist_lock);
293
294 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100295 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e92005-07-07 17:56:02 -0700296 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800297 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 }
299
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700300 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
301
Andi Kleenbf5421c2005-12-12 22:17:09 -0800302 /* Finally remove it */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200303 o = remove_vm_area((void __force *)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800304 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100305 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700307EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700309/*
310 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
311 * access
312 */
313void *xlate_dev_mem_ptr(unsigned long phys)
314{
315 void *addr;
316 unsigned long start = phys & PAGE_MASK;
317
318 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
319 if (page_is_ram(start >> PAGE_SHIFT))
320 return __va(phys);
321
Xiaotian Feng2fb8f4e2009-11-10 17:23:25 +0800322 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700323 if (addr)
324 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
325
326 return addr;
327}
328
329void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
330{
331 if (page_is_ram(phys >> PAGE_SHIFT))
332 return;
333
334 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
335 return;
336}
337
Jaswinder Singh4b6e9f22008-07-23 17:39:16 +0530338static int __initdata early_ioremap_debug;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100339
340static int __init early_ioremap_debug_setup(char *str)
341{
342 early_ioremap_debug = 1;
343
Huang, Ying793b24a2008-01-30 13:33:45 +0100344 return 0;
Ingo Molnard18d6d62008-01-30 13:33:45 +0100345}
Huang, Ying793b24a2008-01-30 13:33:45 +0100346early_param("early_ioremap_debug", early_ioremap_debug_setup);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100347
Huang, Ying0947b2f2008-01-30 13:33:44 +0100348static __initdata int after_paging_init;
Jeremy Fitzhardinge45c7b282009-03-20 17:53:34 -0700349static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100350
Ian Campbell551889a2008-02-09 23:24:09 +0100351static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100352{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100353 /* Don't assume we're using swapper_pg_dir at this point */
354 pgd_t *base = __va(read_cr3());
355 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a2008-02-09 23:24:09 +0100356 pud_t *pud = pud_offset(pgd, addr);
357 pmd_t *pmd = pmd_offset(pud, addr);
358
359 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100360}
361
Ian Campbell551889a2008-02-09 23:24:09 +0100362static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100363{
Ian Campbell551889a2008-02-09 23:24:09 +0100364 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100365}
366
Wang Chen88272472009-03-07 13:34:19 +0800367static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata;
368
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100369void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100370{
Ian Campbell551889a2008-02-09 23:24:09 +0100371 pmd_t *pmd;
Wang Chen88272472009-03-07 13:34:19 +0800372 int i;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100373
Ingo Molnard18d6d62008-01-30 13:33:45 +0100374 if (early_ioremap_debug)
Ingo Molnaradafdf62008-01-30 13:34:08 +0100375 printk(KERN_INFO "early_ioremap_init()\n");
Ingo Molnard18d6d62008-01-30 13:33:45 +0100376
Wang Chen88272472009-03-07 13:34:19 +0800377 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
Wang Chen9f4f25c2009-03-25 14:07:11 +0100378 slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i);
Wang Chen88272472009-03-07 13:34:19 +0800379
Ian Campbell551889a2008-02-09 23:24:09 +0100380 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Jeremy Fitzhardinge45c7b282009-03-20 17:53:34 -0700381 memset(bm_pte, 0, sizeof(bm_pte));
382 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a2008-02-09 23:24:09 +0100383
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100384 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100385 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100386 * we are not prepared:
387 */
Jan Beulich499a5f12009-12-18 16:05:51 +0000388#define __FIXADDR_TOP (-PAGE_SIZE)
389 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
390 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
391#undef __FIXADDR_TOP
Ian Campbell551889a2008-02-09 23:24:09 +0100392 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100393 WARN_ON(1);
Ian Campbell551889a2008-02-09 23:24:09 +0100394 printk(KERN_WARNING "pmd %p != %p\n",
395 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100396 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100397 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100398 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100399 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100400
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100401 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
402 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
403 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100404 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100405}
406
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100407void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100408{
Huang, Ying0947b2f2008-01-30 13:33:44 +0100409 after_paging_init = 1;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100410}
411
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100412static void __init __early_set_fixmap(enum fixed_addresses idx,
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700413 phys_addr_t phys, pgprot_t flags)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100414{
Ian Campbell551889a2008-02-09 23:24:09 +0100415 unsigned long addr = __fix_to_virt(idx);
416 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100417
418 if (idx >= __end_of_fixed_addresses) {
419 BUG();
420 return;
421 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100422 pte = early_ioremap_pte(addr);
Jeremy Fitzhardinge4583ed52008-06-25 00:19:03 -0400423
Huang, Ying0947b2f2008-01-30 13:33:44 +0100424 if (pgprot_val(flags))
Ian Campbell551889a2008-02-09 23:24:09 +0100425 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100426 else
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400427 pte_clear(&init_mm, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100428 __flush_tlb_one(addr);
429}
430
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100431static inline void __init early_set_fixmap(enum fixed_addresses idx,
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700432 phys_addr_t phys, pgprot_t prot)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100433{
434 if (after_paging_init)
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700435 __set_fixmap(idx, phys, prot);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100436 else
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700437 __early_set_fixmap(idx, phys, prot);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100438}
439
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100440static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100441{
442 if (after_paging_init)
443 clear_fixmap(idx);
444 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100445 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100446}
447
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700448static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700449static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata;
Wang Chen88272472009-03-07 13:34:19 +0800450
Liang Lie67a8072010-04-30 18:01:51 +0800451void __init fixup_early_ioremap(void)
452{
453 int i;
454
455 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
456 if (prev_map[i]) {
457 WARN_ON(1);
458 break;
459 }
460 }
461
462 early_ioremap_init();
463}
464
Ingo Molnard690b2a2008-01-30 13:33:47 +0100465static int __init check_early_ioremap_leak(void)
466{
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700467 int count = 0;
468 int i;
469
470 for (i = 0; i < FIX_BTMAPS_SLOTS; i++)
471 if (prev_map[i])
472 count++;
473
474 if (!count)
Ingo Molnard690b2a2008-01-30 13:33:47 +0100475 return 0;
Arjan van de Ven0c072bb2008-07-08 09:50:22 -0700476 WARN(1, KERN_WARNING
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100477 "Debug warning: early ioremap leak of %d areas detected.\n",
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700478 count);
Ingo Molnard690b2a2008-01-30 13:33:47 +0100479 printk(KERN_WARNING
Arjan van de Ven0c072bb2008-07-08 09:50:22 -0700480 "please boot with early_ioremap_debug and report the dmesg.\n");
Ingo Molnard690b2a2008-01-30 13:33:47 +0100481
482 return 1;
483}
484late_initcall(check_early_ioremap_leak);
485
Wang Chen88272472009-03-07 13:34:19 +0800486static void __init __iomem *
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700487__early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488{
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700489 unsigned long offset;
490 resource_size_t last_addr;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700491 unsigned int nrpages;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100492 enum fixed_addresses idx0, idx;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700493 int i, slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100494
495 WARN_ON(system_state != SYSTEM_BOOTING);
496
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700497 slot = -1;
498 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
499 if (!prev_map[i]) {
500 slot = i;
501 break;
502 }
503 }
504
505 if (slot < 0) {
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700506 printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n",
507 (u64)phys_addr, size);
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700508 WARN_ON(1);
509 return NULL;
510 }
511
Ingo Molnard18d6d62008-01-30 13:33:45 +0100512 if (early_ioremap_debug) {
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700513 printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ",
514 (u64)phys_addr, size, slot);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100515 dump_stack();
516 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700517
518 /* Don't allow wraparound or zero size */
519 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100520 if (!size || last_addr < phys_addr) {
521 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700522 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100523 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700524
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700525 prev_size[slot] = size;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700526 /*
527 * Mappings have to be page-aligned
528 */
529 offset = phys_addr & ~PAGE_MASK;
530 phys_addr &= PAGE_MASK;
Alan Coxc613ec12008-10-10 10:46:45 +0100531 size = PAGE_ALIGN(last_addr + 1) - phys_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700532
533 /*
534 * Mappings have to fit in the FIX_BTMAP area.
535 */
536 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100537 if (nrpages > NR_FIX_BTMAPS) {
538 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700539 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100540 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700541
542 /*
543 * Ok, go for it..
544 */
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700545 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100546 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 while (nrpages > 0) {
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700548 early_set_fixmap(idx, phys_addr, prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 phys_addr += PAGE_SIZE;
550 --idx;
551 --nrpages;
552 }
Ingo Molnard18d6d62008-01-30 13:33:45 +0100553 if (early_ioremap_debug)
Wang Chen88272472009-03-07 13:34:19 +0800554 printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100555
Wang Chen88272472009-03-07 13:34:19 +0800556 prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]);
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700557 return prev_map[slot];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558}
559
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700560/* Remap an IO device */
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700561void __init __iomem *
562early_ioremap(resource_size_t phys_addr, unsigned long size)
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700563{
564 return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO);
565}
566
567/* Remap memory */
Masami Hiramatsu9b987ae2009-04-09 10:55:33 -0700568void __init __iomem *
569early_memremap(resource_size_t phys_addr, unsigned long size)
Jeremy Fitzhardinge14941772008-09-07 15:21:15 -0700570{
571 return __early_ioremap(phys_addr, size, PAGE_KERNEL);
572}
573
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700574void __init early_iounmap(void __iomem *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575{
576 unsigned long virt_addr;
577 unsigned long offset;
578 unsigned int nrpages;
579 enum fixed_addresses idx;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700580 int i, slot;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100581
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700582 slot = -1;
583 for (i = 0; i < FIX_BTMAPS_SLOTS; i++) {
584 if (prev_map[i] == addr) {
585 slot = i;
586 break;
587 }
588 }
589
590 if (slot < 0) {
591 printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n",
592 addr, size);
593 WARN_ON(1);
Ingo Molnar226e9a92008-05-27 09:56:49 +0200594 return;
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700595 }
596
597 if (prev_size[slot] != size) {
598 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n",
599 addr, size, slot, prev_size[slot]);
600 WARN_ON(1);
601 return;
602 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700603
Ingo Molnard18d6d62008-01-30 13:33:45 +0100604 if (early_ioremap_debug) {
Ingo Molnaradafdf62008-01-30 13:34:08 +0100605 printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr,
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700606 size, slot);
Ingo Molnard18d6d62008-01-30 13:33:45 +0100607 dump_stack();
608 }
609
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100611 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
612 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100614 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700615 offset = virt_addr & ~PAGE_MASK;
616 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
617
Yinghai Luc1a2f4b2008-09-14 02:33:12 -0700618 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700619 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100620 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621 --idx;
622 --nrpages;
623 }
Harvey Harrison1d6cf1f2008-10-28 22:46:04 -0700624 prev_map[slot] = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625}