blob: baff1da354e0ecfaf371b4e9f30ac6533d657a28 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Pekka Paalanend61fc442008-05-12 21:20:57 +020015#include <linux/mmiotrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010017#include <asm/cacheflush.h>
18#include <asm/e820.h>
19#include <asm/fixmap.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010022#include <asm/pgalloc.h>
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -070023#include <asm/pat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Jeremy Fitzhardinge78c86e52009-09-10 10:09:38 -070025#include "physaddr.h"
Thomas Gleixner240d3a72008-01-30 13:34:05 +010026
Linus Torvalds1da177e2005-04-16 15:20:36 -070027/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010028 * Fix up the linear direct mapping of the kernel to avoid cache attribute
29 * conflicts.
30 */
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070031int ioremap_change_attr(unsigned long vaddr, unsigned long size,
32 unsigned long prot_val)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010033{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010034 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +010035 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010036
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070037 switch (prot_val) {
38 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010039 default:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070040 err = _set_memory_uc(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010041 break;
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -070042 case _PAGE_CACHE_WC:
43 err = _set_memory_wc(vaddr, nrpages);
44 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070045 case _PAGE_CACHE_WB:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070046 err = _set_memory_wb(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010047 break;
48 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010049
50 return err;
51}
52
Roland Dreierc81c8a12014-05-02 11:18:41 -070053static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
54 void *arg)
55{
56 unsigned long i;
57
58 for (i = 0; i < nr_pages; ++i)
59 if (pfn_valid(start_pfn + i) &&
60 !PageReserved(pfn_to_page(start_pfn + i)))
61 return 1;
62
63 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
64
65 return 0;
66}
67
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010068/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 * Remap an arbitrary physical address space into the kernel virtual
70 * address space. Needed when the kernel wants to access high addresses
71 * directly.
72 *
73 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
74 * have to convert them into an offset in a page-aligned mapping, but the
75 * caller shouldn't need to know that small detail.
76 */
Christoph Lameter23016962008-04-28 02:12:42 -070077static void __iomem *__ioremap_caller(resource_size_t phys_addr,
78 unsigned long size, unsigned long prot_val, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -070079{
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090080 unsigned long offset, vaddr;
81 resource_size_t pfn, last_pfn, last_addr;
Pekka Paalanen87e547f2008-05-12 21:21:03 +020082 const resource_size_t unaligned_phys_addr = phys_addr;
83 const unsigned long unaligned_size = size;
Thomas Gleixner91eebf42008-01-30 13:34:05 +010084 struct vm_struct *area;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -070085 unsigned long new_prot_val;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010086 pgprot_t prot;
Venki Pallipadidee7cbb2008-03-24 14:39:55 -070087 int retval;
Pekka Paalanend61fc442008-05-12 21:20:57 +020088 void __iomem *ret_addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
90 /* Don't allow wraparound or zero size */
91 last_addr = phys_addr + size - 1;
92 if (!size || last_addr < phys_addr)
93 return NULL;
94
Thomas Gleixnere3100c82008-02-27 20:57:40 +010095 if (!phys_addr_valid(phys_addr)) {
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -070096 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -070097 (unsigned long long)phys_addr);
Thomas Gleixnere3100c82008-02-27 20:57:40 +010098 WARN_ON_ONCE(1);
99 return NULL;
100 }
101
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102 /*
103 * Don't remap the low PCI/ISA area, it's always mapped..
104 */
Andreas Herrmannbcc643d2008-06-20 21:58:46 +0200105 if (is_ISA_range(phys_addr, last_addr))
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100106 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700107
108 /*
109 * Don't allow anybody to remap normal RAM that we're using..
110 */
Roland Dreierc81c8a12014-05-02 11:18:41 -0700111 pfn = phys_addr >> PAGE_SHIFT;
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +0900112 last_pfn = last_addr >> PAGE_SHIFT;
Roland Dreierc81c8a12014-05-02 11:18:41 -0700113 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
114 __ioremap_check_ram) == 1)
115 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700117 /*
118 * Mappings have to be page-aligned
119 */
120 offset = phys_addr & ~PAGE_MASK;
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +0900121 phys_addr &= PHYSICAL_PAGE_MASK;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700122 size = PAGE_ALIGN(last_addr+1) - phys_addr;
123
Andi Kleene213e872008-08-15 18:12:47 +0200124 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700125 prot_val, &new_prot_val);
126 if (retval) {
Venkatesh Pallipadi279e6692009-07-10 09:57:33 -0700127 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700128 return NULL;
129 }
130
131 if (prot_val != new_prot_val) {
H. Peter Anvinb8551922009-08-26 17:17:51 -0700132 if (!is_new_memtype_allowed(phys_addr, size,
133 prot_val, new_prot_val)) {
Venkatesh Pallipadi279e6692009-07-10 09:57:33 -0700134 printk(KERN_ERR
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700135 "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700136 (unsigned long long)phys_addr,
137 (unsigned long long)(phys_addr + size),
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700138 prot_val, new_prot_val);
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800139 goto err_free_memtype;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700140 }
141 prot_val = new_prot_val;
142 }
143
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700144 switch (prot_val) {
145 case _PAGE_CACHE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100146 default:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700147 prot = PAGE_KERNEL_IO_NOCACHE;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100148 break;
Suresh Siddhade33c442008-04-25 17:07:22 -0700149 case _PAGE_CACHE_UC_MINUS:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700150 prot = PAGE_KERNEL_IO_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700151 break;
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700152 case _PAGE_CACHE_WC:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700153 prot = PAGE_KERNEL_IO_WC;
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700154 break;
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -0700155 case _PAGE_CACHE_WB:
Jeremy Fitzhardingebe43d722008-09-07 15:21:13 -0700156 prot = PAGE_KERNEL_IO;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100157 break;
158 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700159
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 * Ok, go for it..
162 */
Christoph Lameter23016962008-04-28 02:12:42 -0700163 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164 if (!area)
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800165 goto err_free_memtype;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100167 vaddr = (unsigned long) area->addr;
Suresh Siddha43a432b2009-04-09 14:26:47 -0700168
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800169 if (kernel_map_sync_memtype(phys_addr, size, prot_val))
170 goto err_free_area;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100171
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800172 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
173 goto err_free_area;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100174
Pekka Paalanend61fc442008-05-12 21:20:57 +0200175 ret_addr = (void __iomem *) (vaddr + offset);
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200176 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
Pekka Paalanend61fc442008-05-12 21:20:57 +0200177
Tim Gardnerc7a7b8142011-04-28 11:00:30 -0600178 /*
179 * Check if the request spans more than any BAR in the iomem resource
180 * tree.
181 */
182 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
183 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
184
Pekka Paalanend61fc442008-05-12 21:20:57 +0200185 return ret_addr;
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800186err_free_area:
187 free_vm_area(area);
188err_free_memtype:
189 free_memtype(phys_addr, phys_addr + size);
190 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700191}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700192
193/**
194 * ioremap_nocache - map bus memory into CPU space
Wanpeng Li9efc31b2012-06-10 10:50:52 +0800195 * @phys_addr: bus address of the memory
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 * @size: size of the resource to map
197 *
198 * ioremap_nocache performs a platform specific sequence of operations to
199 * make bus memory CPU accessible via the readb/readw/readl/writeb/
200 * writew/writel functions and the other mmio helpers. The returned
201 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100202 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 *
204 * This version of ioremap ensures that the memory is marked uncachable
205 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100206 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 * busses. In particular driver authors should read up on PCI writes
208 *
209 * It's useful if some control registers are in such an area and
210 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100211 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700212 * Must be freed with iounmap.
213 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700214void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215{
Suresh Siddhade33c442008-04-25 17:07:22 -0700216 /*
217 * Ideally, this should be:
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200218 * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700219 *
220 * Till we fix all X drivers to use ioremap_wc(), we will use
221 * UC MINUS.
222 */
223 unsigned long val = _PAGE_CACHE_UC_MINUS;
224
225 return __ioremap_caller(phys_addr, size, val,
Christoph Lameter23016962008-04-28 02:12:42 -0700226 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700228EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700230/**
231 * ioremap_wc - map memory into CPU space write combined
Wanpeng Li9efc31b2012-06-10 10:50:52 +0800232 * @phys_addr: bus address of the memory
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700233 * @size: size of the resource to map
234 *
235 * This version of ioremap ensures that the memory is marked write combining.
236 * Write combining allows faster writes to some hardware devices.
237 *
238 * Must be freed with iounmap.
239 */
venkatesh.pallipadi@intel.comd639bab2009-01-09 16:13:13 -0800240void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700241{
Andreas Herrmann499f8f82008-06-10 16:06:21 +0200242 if (pat_enabled)
Christoph Lameter23016962008-04-28 02:12:42 -0700243 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC,
244 __builtin_return_address(0));
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700245 else
246 return ioremap_nocache(phys_addr, size);
247}
248EXPORT_SYMBOL(ioremap_wc);
249
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700250void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100251{
Christoph Lameter23016962008-04-28 02:12:42 -0700252 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB,
253 __builtin_return_address(0));
Thomas Gleixner5f868152008-01-30 13:34:06 +0100254}
255EXPORT_SYMBOL(ioremap_cache);
256
Rik van Riel28b2ee22008-07-23 21:27:05 -0700257void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
258 unsigned long prot_val)
259{
260 return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK),
261 __builtin_return_address(0));
262}
263EXPORT_SYMBOL(ioremap_prot);
264
Andi Kleenbf5421c2005-12-12 22:17:09 -0800265/**
266 * iounmap - Free a IO remapping
267 * @addr: virtual address from ioremap_*
268 *
269 * Caller must ensure there is only one unmapping for the same pointer.
270 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700271void iounmap(volatile void __iomem *addr)
272{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800273 struct vm_struct *p, *o;
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700274
275 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 return;
277
278 /*
279 * __ioremap special-cases the PCI/ISA range by not instantiating a
280 * vm_area and by simply returning an address into the kernel mapping
281 * of ISA space. So handle that here.
282 */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200283 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
284 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700285 return;
286
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100287 addr = (volatile void __iomem *)
288 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800289
Pekka Paalanend61fc442008-05-12 21:20:57 +0200290 mmiotrace_iounmap(addr);
291
Andi Kleenbf5421c2005-12-12 22:17:09 -0800292 /* Use the vm area unlocked, assuming the caller
293 ensures there isn't another iounmap for the same address
294 in parallel. Reuse of the virtual address is prevented by
295 leaving it in the global lists until we're done with it.
296 cpa takes care of the direct mappings. */
Joonsoo Kimef932472013-04-29 15:07:27 -0700297 p = find_vm_area((void __force *)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800298
299 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100300 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700301 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800302 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303 }
304
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700305 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
306
Andi Kleenbf5421c2005-12-12 22:17:09 -0800307 /* Finally remove it */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200308 o = remove_vm_area((void __force *)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800309 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100310 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700312EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700314/*
315 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
316 * access
317 */
318void *xlate_dev_mem_ptr(unsigned long phys)
319{
320 void *addr;
321 unsigned long start = phys & PAGE_MASK;
322
323 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
324 if (page_is_ram(start >> PAGE_SHIFT))
325 return __va(phys);
326
Xiaotian Feng2fb8f4e2009-11-10 17:23:25 +0800327 addr = (void __force *)ioremap_cache(start, PAGE_SIZE);
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700328 if (addr)
329 addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK));
330
331 return addr;
332}
333
334void unxlate_dev_mem_ptr(unsigned long phys, void *addr)
335{
336 if (page_is_ram(phys >> PAGE_SHIFT))
337 return;
338
339 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
340 return;
341}
342
Jeremy Fitzhardinge45c7b282009-03-20 17:53:34 -0700343static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100344
Ian Campbell551889a2008-02-09 23:24:09 +0100345static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100346{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100347 /* Don't assume we're using swapper_pg_dir at this point */
348 pgd_t *base = __va(read_cr3());
349 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a2008-02-09 23:24:09 +0100350 pud_t *pud = pud_offset(pgd, addr);
351 pmd_t *pmd = pmd_offset(pud, addr);
352
353 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100354}
355
Ian Campbell551889a2008-02-09 23:24:09 +0100356static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100357{
Ian Campbell551889a2008-02-09 23:24:09 +0100358 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100359}
360
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700361bool __init is_early_ioremap_ptep(pte_t *ptep)
362{
363 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
364}
365
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100366void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100367{
Ian Campbell551889a2008-02-09 23:24:09 +0100368 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100369
Andy Lutomirski73159fd2014-05-05 12:19:31 -0700370#ifdef CONFIG_X86_64
371 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
372#else
373 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
374#endif
375
Mark Salter5b7c73e2014-04-07 15:39:49 -0700376 early_ioremap_setup();
Wang Chen88272472009-03-07 13:34:19 +0800377
Ian Campbell551889a2008-02-09 23:24:09 +0100378 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Jeremy Fitzhardinge45c7b282009-03-20 17:53:34 -0700379 memset(bm_pte, 0, sizeof(bm_pte));
380 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a2008-02-09 23:24:09 +0100381
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100382 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100383 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100384 * we are not prepared:
385 */
Jan Beulich499a5f12009-12-18 16:05:51 +0000386#define __FIXADDR_TOP (-PAGE_SIZE)
387 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
388 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
389#undef __FIXADDR_TOP
Ian Campbell551889a2008-02-09 23:24:09 +0100390 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100391 WARN_ON(1);
Ian Campbell551889a2008-02-09 23:24:09 +0100392 printk(KERN_WARNING "pmd %p != %p\n",
393 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100394 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100395 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100396 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100397 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100398
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100399 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
400 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
401 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100402 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100403}
404
Mark Salter5b7c73e2014-04-07 15:39:49 -0700405void __init __early_set_fixmap(enum fixed_addresses idx,
406 phys_addr_t phys, pgprot_t flags)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100407{
Ian Campbell551889a2008-02-09 23:24:09 +0100408 unsigned long addr = __fix_to_virt(idx);
409 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100410
411 if (idx >= __end_of_fixed_addresses) {
412 BUG();
413 return;
414 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100415 pte = early_ioremap_pte(addr);
Jeremy Fitzhardinge4583ed52008-06-25 00:19:03 -0400416
Huang, Ying0947b2f2008-01-30 13:33:44 +0100417 if (pgprot_val(flags))
Ian Campbell551889a2008-02-09 23:24:09 +0100418 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100419 else
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400420 pte_clear(&init_mm, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100421 __flush_tlb_one(addr);
422}