blob: b0da3588b452de743928b15cdacde58f146adcd0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07002 * Re-map IO memory to kernel address space so that we can access it.
3 * This is needed for high PCI addresses that aren't mapped in the
4 * 640k-1MB IO memory area on PC's
5 *
6 * (C) Copyright 1995 1996 Linus Torvalds
7 */
8
Thomas Gleixnere9332ca2008-01-30 13:34:05 +01009#include <linux/bootmem.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/init.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070011#include <linux/io.h>
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010012#include <linux/module.h>
13#include <linux/slab.h>
14#include <linux/vmalloc.h>
Pekka Paalanend61fc442008-05-12 21:20:57 +020015#include <linux/mmiotrace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
Thomas Gleixner3cbd09e2008-01-30 13:34:05 +010017#include <asm/cacheflush.h>
18#include <asm/e820.h>
19#include <asm/fixmap.h>
20#include <asm/pgtable.h>
21#include <asm/tlbflush.h>
Jeremy Fitzhardingef6df72e2008-01-30 13:34:11 +010022#include <asm/pgalloc.h>
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -070023#include <asm/pat.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
Jeremy Fitzhardinge78c86e52009-09-10 10:09:38 -070025#include "physaddr.h"
Thomas Gleixner240d3a72008-01-30 13:34:05 +010026
Linus Torvalds1da177e2005-04-16 15:20:36 -070027/*
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010028 * Fix up the linear direct mapping of the kernel to avoid cache attribute
29 * conflicts.
30 */
venkatesh.pallipadi@intel.com3a96ce82008-03-18 17:00:16 -070031int ioremap_change_attr(unsigned long vaddr, unsigned long size,
Juergen Grossb14097b2014-11-03 14:01:58 +010032 enum page_cache_mode pcm)
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010033{
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010034 unsigned long nrpages = size >> PAGE_SHIFT;
Harvey Harrison93809be2008-02-01 17:49:43 +010035 int err;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010036
Juergen Grossb14097b2014-11-03 14:01:58 +010037 switch (pcm) {
38 case _PAGE_CACHE_MODE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010039 default:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070040 err = _set_memory_uc(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010041 break;
Juergen Grossb14097b2014-11-03 14:01:58 +010042 case _PAGE_CACHE_MODE_WC:
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -070043 err = _set_memory_wc(vaddr, nrpages);
44 break;
Juergen Grossb14097b2014-11-03 14:01:58 +010045 case _PAGE_CACHE_MODE_WB:
venkatesh.pallipadi@intel.com12193332008-03-18 17:00:18 -070046 err = _set_memory_wb(vaddr, nrpages);
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010047 break;
48 }
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010049
50 return err;
51}
52
Roland Dreierc81c8a12014-05-02 11:18:41 -070053static int __ioremap_check_ram(unsigned long start_pfn, unsigned long nr_pages,
54 void *arg)
55{
56 unsigned long i;
57
58 for (i = 0; i < nr_pages; ++i)
59 if (pfn_valid(start_pfn + i) &&
60 !PageReserved(pfn_to_page(start_pfn + i)))
61 return 1;
62
63 WARN_ONCE(1, "ioremap on RAM pfn 0x%lx\n", start_pfn);
64
65 return 0;
66}
67
Thomas Gleixnere9332ca2008-01-30 13:34:05 +010068/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070069 * Remap an arbitrary physical address space into the kernel virtual
Toshi Kani5d72b4f2015-04-14 15:47:29 -070070 * address space. It transparently creates kernel huge I/O mapping when
71 * the physical address is aligned by a huge page size (1GB or 2MB) and
72 * the requested size is at least the huge page size.
73 *
74 * NOTE: MTRRs can override PAT memory types with a 4KB granularity.
75 * Therefore, the mapping code falls back to use a smaller page toward 4KB
76 * when a mapping range is covered by non-WB type of MTRRs.
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 *
78 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
79 * have to convert them into an offset in a page-aligned mapping, but the
80 * caller shouldn't need to know that small detail.
81 */
Christoph Lameter23016962008-04-28 02:12:42 -070082static void __iomem *__ioremap_caller(resource_size_t phys_addr,
Juergen Grossb14097b2014-11-03 14:01:58 +010083 unsigned long size, enum page_cache_mode pcm, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -070084{
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +090085 unsigned long offset, vaddr;
86 resource_size_t pfn, last_pfn, last_addr;
Pekka Paalanen87e547f2008-05-12 21:21:03 +020087 const resource_size_t unaligned_phys_addr = phys_addr;
88 const unsigned long unaligned_size = size;
Thomas Gleixner91eebf42008-01-30 13:34:05 +010089 struct vm_struct *area;
Juergen Grossb14097b2014-11-03 14:01:58 +010090 enum page_cache_mode new_pcm;
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +010091 pgprot_t prot;
Venki Pallipadidee7cbb2008-03-24 14:39:55 -070092 int retval;
Pekka Paalanend61fc442008-05-12 21:20:57 +020093 void __iomem *ret_addr;
Mike Travis906e36c2014-10-13 15:54:05 -070094 int ram_region;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96 /* Don't allow wraparound or zero size */
97 last_addr = phys_addr + size - 1;
98 if (!size || last_addr < phys_addr)
99 return NULL;
100
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100101 if (!phys_addr_valid(phys_addr)) {
venkatesh.pallipadi@intel.com6997ab42008-03-18 17:00:25 -0700102 printk(KERN_WARNING "ioremap: invalid physical address %llx\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700103 (unsigned long long)phys_addr);
Thomas Gleixnere3100c82008-02-27 20:57:40 +0100104 WARN_ON_ONCE(1);
105 return NULL;
106 }
107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 /*
109 * Don't remap the low PCI/ISA area, it's always mapped..
110 */
Andreas Herrmannbcc643d2008-06-20 21:58:46 +0200111 if (is_ISA_range(phys_addr, last_addr))
Thomas Gleixner4b40fce2008-01-30 13:34:05 +0100112 return (__force void __iomem *)phys_to_virt(phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114 /*
115 * Don't allow anybody to remap normal RAM that we're using..
116 */
Mike Travis906e36c2014-10-13 15:54:05 -0700117 /* First check if whole region can be identified as RAM or not */
118 ram_region = region_is_ram(phys_addr, size);
119 if (ram_region > 0) {
120 WARN_ONCE(1, "ioremap on RAM at 0x%lx - 0x%lx\n",
121 (unsigned long int)phys_addr,
122 (unsigned long int)last_addr);
Roland Dreierc81c8a12014-05-02 11:18:41 -0700123 return NULL;
Mike Travis906e36c2014-10-13 15:54:05 -0700124 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125
Mike Travis906e36c2014-10-13 15:54:05 -0700126 /* If could not be identified(-1), check page by page */
127 if (ram_region < 0) {
128 pfn = phys_addr >> PAGE_SHIFT;
129 last_pfn = last_addr >> PAGE_SHIFT;
130 if (walk_system_ram_range(pfn, last_pfn - pfn + 1, NULL,
131 __ioremap_check_ram) == 1)
132 return NULL;
133 }
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700134 /*
135 * Mappings have to be page-aligned
136 */
137 offset = phys_addr & ~PAGE_MASK;
Kenji Kaneshigeffa71f32010-06-18 12:22:40 +0900138 phys_addr &= PHYSICAL_PAGE_MASK;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700139 size = PAGE_ALIGN(last_addr+1) - phys_addr;
140
Andi Kleene213e872008-08-15 18:12:47 +0200141 retval = reserve_memtype(phys_addr, (u64)phys_addr + size,
Juergen Grosse00c8cc2014-11-03 14:01:59 +0100142 pcm, &new_pcm);
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700143 if (retval) {
Venkatesh Pallipadi279e6692009-07-10 09:57:33 -0700144 printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval);
Venki Pallipadidee7cbb2008-03-24 14:39:55 -0700145 return NULL;
146 }
147
Juergen Grossb14097b2014-11-03 14:01:58 +0100148 if (pcm != new_pcm) {
149 if (!is_new_memtype_allowed(phys_addr, size, pcm, new_pcm)) {
Venkatesh Pallipadi279e6692009-07-10 09:57:33 -0700150 printk(KERN_ERR
Juergen Grossb14097b2014-11-03 14:01:58 +0100151 "ioremap error for 0x%llx-0x%llx, requested 0x%x, got 0x%x\n",
Randy Dunlap4c8337a2008-04-10 15:09:50 -0700152 (unsigned long long)phys_addr,
153 (unsigned long long)(phys_addr + size),
Juergen Grossb14097b2014-11-03 14:01:58 +0100154 pcm, new_pcm);
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800155 goto err_free_memtype;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700156 }
Juergen Grossb14097b2014-11-03 14:01:58 +0100157 pcm = new_pcm;
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700158 }
159
Juergen Grossb14097b2014-11-03 14:01:58 +0100160 prot = PAGE_KERNEL_IO;
161 switch (pcm) {
162 case _PAGE_CACHE_MODE_UC:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100163 default:
Juergen Grossb14097b2014-11-03 14:01:58 +0100164 prot = __pgprot(pgprot_val(prot) |
165 cachemode2protval(_PAGE_CACHE_MODE_UC));
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100166 break;
Juergen Grossb14097b2014-11-03 14:01:58 +0100167 case _PAGE_CACHE_MODE_UC_MINUS:
168 prot = __pgprot(pgprot_val(prot) |
169 cachemode2protval(_PAGE_CACHE_MODE_UC_MINUS));
Suresh Siddhade33c442008-04-25 17:07:22 -0700170 break;
Juergen Grossb14097b2014-11-03 14:01:58 +0100171 case _PAGE_CACHE_MODE_WC:
172 prot = __pgprot(pgprot_val(prot) |
173 cachemode2protval(_PAGE_CACHE_MODE_WC));
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700174 break;
Juergen Grossb14097b2014-11-03 14:01:58 +0100175 case _PAGE_CACHE_MODE_WB:
Thomas Gleixnerd806e5e2008-01-30 13:34:06 +0100176 break;
177 }
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -0700178
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179 /*
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 * Ok, go for it..
181 */
Christoph Lameter23016962008-04-28 02:12:42 -0700182 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 if (!area)
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800184 goto err_free_memtype;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185 area->phys_addr = phys_addr;
Thomas Gleixnere66aadb2008-02-04 16:48:05 +0100186 vaddr = (unsigned long) area->addr;
Suresh Siddha43a432b2009-04-09 14:26:47 -0700187
Juergen Grossb14097b2014-11-03 14:01:58 +0100188 if (kernel_map_sync_memtype(phys_addr, size, pcm))
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800189 goto err_free_area;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100190
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800191 if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot))
192 goto err_free_area;
Thomas Gleixnere9332ca2008-01-30 13:34:05 +0100193
Pekka Paalanend61fc442008-05-12 21:20:57 +0200194 ret_addr = (void __iomem *) (vaddr + offset);
Pekka Paalanen87e547f2008-05-12 21:21:03 +0200195 mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr);
Pekka Paalanend61fc442008-05-12 21:20:57 +0200196
Tim Gardnerc7a7b8142011-04-28 11:00:30 -0600197 /*
198 * Check if the request spans more than any BAR in the iomem resource
199 * tree.
200 */
201 WARN_ONCE(iomem_map_sanity_check(unaligned_phys_addr, unaligned_size),
202 KERN_INFO "Info: mapping multiple BARs. Your kernel is fine.");
203
Pekka Paalanend61fc442008-05-12 21:20:57 +0200204 return ret_addr;
Xiaotian Fengde2a47c2009-11-05 10:43:51 +0800205err_free_area:
206 free_vm_area(area);
207err_free_memtype:
208 free_memtype(phys_addr, phys_addr + size);
209 return NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211
212/**
213 * ioremap_nocache - map bus memory into CPU space
Wanpeng Li9efc31b2012-06-10 10:50:52 +0800214 * @phys_addr: bus address of the memory
Linus Torvalds1da177e2005-04-16 15:20:36 -0700215 * @size: size of the resource to map
216 *
217 * ioremap_nocache performs a platform specific sequence of operations to
218 * make bus memory CPU accessible via the readb/readw/readl/writeb/
219 * writew/writel functions and the other mmio helpers. The returned
220 * address is not guaranteed to be usable directly as a virtual
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100221 * address.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 *
223 * This version of ioremap ensures that the memory is marked uncachable
224 * on the CPU as well as honouring existing caching rules from things like
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100225 * the PCI bus. Note that there are other caches and buffers on many
Linus Torvalds1da177e2005-04-16 15:20:36 -0700226 * busses. In particular driver authors should read up on PCI writes
227 *
228 * It's useful if some control registers are in such an area and
229 * write combining or read caching is not desirable:
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100230 *
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 * Must be freed with iounmap.
232 */
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700233void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234{
Suresh Siddhade33c442008-04-25 17:07:22 -0700235 /*
236 * Ideally, this should be:
Luis R. Rodriguezcb32edf2015-05-26 10:28:15 +0200237 * pat_enabled() ? _PAGE_CACHE_MODE_UC : _PAGE_CACHE_MODE_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700238 *
239 * Till we fix all X drivers to use ioremap_wc(), we will use
Luis R. Rodrigueze4b6be332015-05-11 10:15:53 +0200240 * UC MINUS. Drivers that are certain they need or can already
241 * be converted over to strong UC can use ioremap_uc().
Suresh Siddhade33c442008-04-25 17:07:22 -0700242 */
Juergen Grossb14097b2014-11-03 14:01:58 +0100243 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC_MINUS;
Suresh Siddhade33c442008-04-25 17:07:22 -0700244
Juergen Grossb14097b2014-11-03 14:01:58 +0100245 return __ioremap_caller(phys_addr, size, pcm,
Christoph Lameter23016962008-04-28 02:12:42 -0700246 __builtin_return_address(0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700247}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700248EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700250/**
Luis R. Rodrigueze4b6be332015-05-11 10:15:53 +0200251 * ioremap_uc - map bus memory into CPU space as strongly uncachable
252 * @phys_addr: bus address of the memory
253 * @size: size of the resource to map
254 *
255 * ioremap_uc performs a platform specific sequence of operations to
256 * make bus memory CPU accessible via the readb/readw/readl/writeb/
257 * writew/writel functions and the other mmio helpers. The returned
258 * address is not guaranteed to be usable directly as a virtual
259 * address.
260 *
261 * This version of ioremap ensures that the memory is marked with a strong
262 * preference as completely uncachable on the CPU when possible. For non-PAT
263 * systems this ends up setting page-attribute flags PCD=1, PWT=1. For PAT
264 * systems this will set the PAT entry for the pages as strong UC. This call
265 * will honor existing caching rules from things like the PCI bus. Note that
266 * there are other caches and buffers on many busses. In particular driver
267 * authors should read up on PCI writes.
268 *
269 * It's useful if some control registers are in such an area and
270 * write combining or read caching is not desirable:
271 *
272 * Must be freed with iounmap.
273 */
274void __iomem *ioremap_uc(resource_size_t phys_addr, unsigned long size)
275{
276 enum page_cache_mode pcm = _PAGE_CACHE_MODE_UC;
277
278 return __ioremap_caller(phys_addr, size, pcm,
279 __builtin_return_address(0));
280}
281EXPORT_SYMBOL_GPL(ioremap_uc);
282
283/**
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700284 * ioremap_wc - map memory into CPU space write combined
Wanpeng Li9efc31b2012-06-10 10:50:52 +0800285 * @phys_addr: bus address of the memory
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700286 * @size: size of the resource to map
287 *
288 * This version of ioremap ensures that the memory is marked write combining.
289 * Write combining allows faster writes to some hardware devices.
290 *
291 * Must be freed with iounmap.
292 */
venkatesh.pallipadi@intel.comd639bab2009-01-09 16:13:13 -0800293void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size)
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700294{
Luis R. Rodriguezcb32edf2015-05-26 10:28:15 +0200295 if (pat_enabled())
Juergen Grossb14097b2014-11-03 14:01:58 +0100296 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WC,
Christoph Lameter23016962008-04-28 02:12:42 -0700297 __builtin_return_address(0));
venkatesh.pallipadi@intel.comb310f381d2008-03-18 17:00:24 -0700298 else
299 return ioremap_nocache(phys_addr, size);
300}
301EXPORT_SYMBOL(ioremap_wc);
302
Linus Torvaldsb9e76a02008-03-24 11:22:39 -0700303void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size)
Thomas Gleixner5f868152008-01-30 13:34:06 +0100304{
Juergen Grossb14097b2014-11-03 14:01:58 +0100305 return __ioremap_caller(phys_addr, size, _PAGE_CACHE_MODE_WB,
Christoph Lameter23016962008-04-28 02:12:42 -0700306 __builtin_return_address(0));
Thomas Gleixner5f868152008-01-30 13:34:06 +0100307}
308EXPORT_SYMBOL(ioremap_cache);
309
Rik van Riel28b2ee22008-07-23 21:27:05 -0700310void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size,
311 unsigned long prot_val)
312{
Juergen Grossb14097b2014-11-03 14:01:58 +0100313 return __ioremap_caller(phys_addr, size,
314 pgprot2cachemode(__pgprot(prot_val)),
Rik van Riel28b2ee22008-07-23 21:27:05 -0700315 __builtin_return_address(0));
316}
317EXPORT_SYMBOL(ioremap_prot);
318
Andi Kleenbf5421c2005-12-12 22:17:09 -0800319/**
320 * iounmap - Free a IO remapping
321 * @addr: virtual address from ioremap_*
322 *
323 * Caller must ensure there is only one unmapping for the same pointer.
324 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700325void iounmap(volatile void __iomem *addr)
326{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800327 struct vm_struct *p, *o;
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700328
329 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700330 return;
331
332 /*
333 * __ioremap special-cases the PCI/ISA range by not instantiating a
334 * vm_area and by simply returning an address into the kernel mapping
335 * of ISA space. So handle that here.
336 */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200337 if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) &&
338 (void __force *)addr < phys_to_virt(ISA_END_ADDRESS))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700339 return;
340
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100341 addr = (volatile void __iomem *)
342 (PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800343
Pekka Paalanend61fc442008-05-12 21:20:57 +0200344 mmiotrace_iounmap(addr);
345
Andi Kleenbf5421c2005-12-12 22:17:09 -0800346 /* Use the vm area unlocked, assuming the caller
347 ensures there isn't another iounmap for the same address
348 in parallel. Reuse of the virtual address is prevented by
349 leaving it in the global lists until we're done with it.
350 cpa takes care of the direct mappings. */
Joonsoo Kimef932472013-04-29 15:07:27 -0700351 p = find_vm_area((void __force *)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800352
353 if (!p) {
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100354 printk(KERN_ERR "iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700355 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800356 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 }
358
venkatesh.pallipadi@intel.comd7677d42008-03-18 17:00:17 -0700359 free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p));
360
Andi Kleenbf5421c2005-12-12 22:17:09 -0800361 /* Finally remove it */
Thomas Gleixner6e92a5a2008-05-12 15:43:35 +0200362 o = remove_vm_area((void __force *)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800363 BUG_ON(p != o || o == NULL);
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100364 kfree(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700366EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367
Jan Beulich1e6277d2015-05-28 09:29:27 +0100368int __init arch_ioremap_pud_supported(void)
Toshi Kani5d72b4f2015-04-14 15:47:29 -0700369{
370#ifdef CONFIG_X86_64
371 return cpu_has_gbpages;
372#else
373 return 0;
374#endif
375}
376
Jan Beulich1e6277d2015-05-28 09:29:27 +0100377int __init arch_ioremap_pmd_supported(void)
Toshi Kani5d72b4f2015-04-14 15:47:29 -0700378{
379 return cpu_has_pse;
380}
381
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700382/*
383 * Convert a physical pointer to a virtual kernel pointer for /dev/mem
384 * access
385 */
Thierry Reding4707a342014-07-28 17:20:33 +0200386void *xlate_dev_mem_ptr(phys_addr_t phys)
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700387{
Ingo Molnar94d4b472012-11-23 19:19:07 +0100388 unsigned long start = phys & PAGE_MASK;
389 unsigned long offset = phys & ~PAGE_MASK;
390 unsigned long vaddr;
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700391
392 /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */
393 if (page_is_ram(start >> PAGE_SHIFT))
394 return __va(phys);
395
Ingo Molnar94d4b472012-11-23 19:19:07 +0100396 vaddr = (unsigned long)ioremap_cache(start, PAGE_SIZE);
397 /* Only add the offset on success and return NULL if the ioremap() failed: */
398 if (vaddr)
399 vaddr += offset;
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700400
Ingo Molnar94d4b472012-11-23 19:19:07 +0100401 return (void *)vaddr;
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700402}
403
Thierry Reding4707a342014-07-28 17:20:33 +0200404void unxlate_dev_mem_ptr(phys_addr_t phys, void *addr)
venkatesh.pallipadi@intel.come045fb22008-03-18 17:00:15 -0700405{
406 if (page_is_ram(phys >> PAGE_SHIFT))
407 return;
408
409 iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK));
410 return;
411}
412
Jeremy Fitzhardinge45c7b282009-03-20 17:53:34 -0700413static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100414
Ian Campbell551889a2008-02-09 23:24:09 +0100415static inline pmd_t * __init early_ioremap_pmd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100416{
Jeremy Fitzhardinge37cc8d72008-02-13 16:20:35 +0100417 /* Don't assume we're using swapper_pg_dir at this point */
418 pgd_t *base = __va(read_cr3());
419 pgd_t *pgd = &base[pgd_index(addr)];
Ian Campbell551889a2008-02-09 23:24:09 +0100420 pud_t *pud = pud_offset(pgd, addr);
421 pmd_t *pmd = pmd_offset(pud, addr);
422
423 return pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100424}
425
Ian Campbell551889a2008-02-09 23:24:09 +0100426static inline pte_t * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100427{
Ian Campbell551889a2008-02-09 23:24:09 +0100428 return &bm_pte[pte_index(addr)];
Huang, Ying0947b2f2008-01-30 13:33:44 +0100429}
430
Jeremy Fitzhardingefef5ba72010-10-13 16:02:24 -0700431bool __init is_early_ioremap_ptep(pte_t *ptep)
432{
433 return ptep >= &bm_pte[0] && ptep < &bm_pte[PAGE_SIZE/sizeof(pte_t)];
434}
435
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100436void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100437{
Ian Campbell551889a2008-02-09 23:24:09 +0100438 pmd_t *pmd;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100439
Andy Lutomirski73159fd2014-05-05 12:19:31 -0700440#ifdef CONFIG_X86_64
441 BUILD_BUG_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
442#else
443 WARN_ON((fix_to_virt(0) + PAGE_SIZE) & ((1 << PMD_SHIFT) - 1));
444#endif
445
Mark Salter5b7c73e2014-04-07 15:39:49 -0700446 early_ioremap_setup();
Wang Chen88272472009-03-07 13:34:19 +0800447
Ian Campbell551889a2008-02-09 23:24:09 +0100448 pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN));
Jeremy Fitzhardinge45c7b282009-03-20 17:53:34 -0700449 memset(bm_pte, 0, sizeof(bm_pte));
450 pmd_populate_kernel(&init_mm, pmd, bm_pte);
Ian Campbell551889a2008-02-09 23:24:09 +0100451
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100452 /*
Ian Campbell551889a2008-02-09 23:24:09 +0100453 * The boot-ioremap range spans multiple pmds, for which
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100454 * we are not prepared:
455 */
Jan Beulich499a5f12009-12-18 16:05:51 +0000456#define __FIXADDR_TOP (-PAGE_SIZE)
457 BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT)
458 != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT));
459#undef __FIXADDR_TOP
Ian Campbell551889a2008-02-09 23:24:09 +0100460 if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) {
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100461 WARN_ON(1);
Ian Campbell551889a2008-02-09 23:24:09 +0100462 printk(KERN_WARNING "pmd %p != %p\n",
463 pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END)));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100464 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100465 fix_to_virt(FIX_BTMAP_BEGIN));
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100466 printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n",
Ian Campbell551889a2008-02-09 23:24:09 +0100467 fix_to_virt(FIX_BTMAP_END));
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100468
Thomas Gleixner91eebf42008-01-30 13:34:05 +0100469 printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END);
470 printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n",
471 FIX_BTMAP_BEGIN);
Ingo Molnar0e3a9542008-01-30 13:33:49 +0100472 }
Huang, Ying0947b2f2008-01-30 13:33:44 +0100473}
474
Mark Salter5b7c73e2014-04-07 15:39:49 -0700475void __init __early_set_fixmap(enum fixed_addresses idx,
476 phys_addr_t phys, pgprot_t flags)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100477{
Ian Campbell551889a2008-02-09 23:24:09 +0100478 unsigned long addr = __fix_to_virt(idx);
479 pte_t *pte;
Huang, Ying0947b2f2008-01-30 13:33:44 +0100480
481 if (idx >= __end_of_fixed_addresses) {
482 BUG();
483 return;
484 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100485 pte = early_ioremap_pte(addr);
Jeremy Fitzhardinge4583ed52008-06-25 00:19:03 -0400486
Huang, Ying0947b2f2008-01-30 13:33:44 +0100487 if (pgprot_val(flags))
Ian Campbell551889a2008-02-09 23:24:09 +0100488 set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100489 else
Jeremy Fitzhardinge4f9c11d2008-06-25 00:19:19 -0400490 pte_clear(&init_mm, addr, pte);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100491 __flush_tlb_one(addr);
492}