blob: 4bcd5e065df4b990904ff290b07c9a9946315b1b [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/i386/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
10
11#include <linux/vmalloc.h>
12#include <linux/init.h>
13#include <linux/slab.h>
Alexey Dobriyan129f6942005-06-23 00:08:33 -070014#include <linux/module.h>
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070015#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/fixmap.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19#include <asm/pgtable.h>
20
21#define ISA_START_ADDRESS 0xa0000
22#define ISA_END_ADDRESS 0x100000
23
Linus Torvalds1da177e2005-04-16 15:20:36 -070024/*
25 * Generic mapping function (not visible outside):
26 */
27
28/*
29 * Remap an arbitrary physical address space into the kernel virtual
30 * address space. Needed when the kernel wants to access high addresses
31 * directly.
32 *
33 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
34 * have to convert them into an offset in a page-aligned mapping, but the
35 * caller shouldn't need to know that small detail.
36 */
37void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
38{
39 void __iomem * addr;
40 struct vm_struct * area;
41 unsigned long offset, last_addr;
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070042 pgprot_t prot;
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
44 /* Don't allow wraparound or zero size */
45 last_addr = phys_addr + size - 1;
46 if (!size || last_addr < phys_addr)
47 return NULL;
48
49 /*
50 * Don't remap the low PCI/ISA area, it's always mapped..
51 */
52 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
53 return (void __iomem *) phys_to_virt(phys_addr);
54
55 /*
56 * Don't allow anybody to remap normal RAM that we're using..
57 */
58 if (phys_addr <= virt_to_phys(high_memory - 1)) {
59 char *t_addr, *t_end;
60 struct page *page;
61
62 t_addr = __va(phys_addr);
63 t_end = t_addr + (size - 1);
64
65 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
66 if(!PageReserved(page))
67 return NULL;
68 }
69
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070070 prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY
71 | _PAGE_ACCESSED | flags);
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073 /*
74 * Mappings have to be page-aligned
75 */
76 offset = phys_addr & ~PAGE_MASK;
77 phys_addr &= PAGE_MASK;
78 size = PAGE_ALIGN(last_addr+1) - phys_addr;
79
80 /*
81 * Ok, go for it..
82 */
83 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
84 if (!area)
85 return NULL;
86 area->phys_addr = phys_addr;
87 addr = (void __iomem *) area->addr;
88 if (ioremap_page_range((unsigned long) addr,
Haavard Skinnemoena148ecf2006-09-30 23:29:17 -070089 (unsigned long) addr + size, phys_addr, prot)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 vunmap((void __force *) addr);
91 return NULL;
92 }
93 return (void __iomem *) (offset + (char __iomem *)addr);
94}
Alexey Dobriyan129f6942005-06-23 00:08:33 -070095EXPORT_SYMBOL(__ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096
97/**
98 * ioremap_nocache - map bus memory into CPU space
99 * @offset: bus address of the memory
100 * @size: size of the resource to map
101 *
102 * ioremap_nocache performs a platform specific sequence of operations to
103 * make bus memory CPU accessible via the readb/readw/readl/writeb/
104 * writew/writel functions and the other mmio helpers. The returned
105 * address is not guaranteed to be usable directly as a virtual
106 * address.
107 *
108 * This version of ioremap ensures that the memory is marked uncachable
109 * on the CPU as well as honouring existing caching rules from things like
110 * the PCI bus. Note that there are other caches and buffers on many
111 * busses. In particular driver authors should read up on PCI writes
112 *
113 * It's useful if some control registers are in such an area and
114 * write combining or read caching is not desirable:
115 *
116 * Must be freed with iounmap.
117 */
118
119void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
120{
121 unsigned long last_addr;
Siddha, Suresh B4138cc32008-01-30 13:33:43 +0100122 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD | _PAGE_PWT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 if (!p)
124 return p;
125
126 /* Guaranteed to be > phys_addr, as per __ioremap() */
127 last_addr = phys_addr + size - 1;
128
129 if (last_addr < virt_to_phys(high_memory) - 1) {
130 struct page *ppage = virt_to_page(__va(phys_addr));
131 unsigned long npages;
132
133 phys_addr &= PAGE_MASK;
134
135 /* This might overflow and become zero.. */
136 last_addr = PAGE_ALIGN(last_addr);
137
138 /* .. but that's ok, because modulo-2**n arithmetic will make
139 * the page-aligned "last - first" come out right.
140 */
141 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
142
143 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
144 iounmap(p);
145 p = NULL;
146 }
147 global_flush_tlb();
148 }
149
150 return p;
151}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700152EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153
Andi Kleenbf5421c2005-12-12 22:17:09 -0800154/**
155 * iounmap - Free a IO remapping
156 * @addr: virtual address from ioremap_*
157 *
158 * Caller must ensure there is only one unmapping for the same pointer.
159 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160void iounmap(volatile void __iomem *addr)
161{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800162 struct vm_struct *p, *o;
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700163
164 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 return;
166
167 /*
168 * __ioremap special-cases the PCI/ISA range by not instantiating a
169 * vm_area and by simply returning an address into the kernel mapping
170 * of ISA space. So handle that here.
171 */
172 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
173 addr < phys_to_virt(ISA_END_ADDRESS))
174 return;
175
Al Virob16b88e2005-12-15 09:17:50 +0000176 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800177
178 /* Use the vm area unlocked, assuming the caller
179 ensures there isn't another iounmap for the same address
180 in parallel. Reuse of the virtual address is prevented by
181 leaving it in the global lists until we're done with it.
182 cpa takes care of the direct mappings. */
183 read_lock(&vmlist_lock);
184 for (p = vmlist; p; p = p->next) {
185 if (p->addr == addr)
186 break;
187 }
188 read_unlock(&vmlist_lock);
189
190 if (!p) {
191 printk("iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700192 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800193 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194 }
195
Andi Kleenbf5421c2005-12-12 22:17:09 -0800196 /* Reset the direct mapping. Can block */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198 change_page_attr(virt_to_page(__va(p->phys_addr)),
Jeremy Fitzhardinge95851162007-07-21 17:11:35 +0200199 get_vm_area_size(p) >> PAGE_SHIFT,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 PAGE_KERNEL);
201 global_flush_tlb();
202 }
Andi Kleenbf5421c2005-12-12 22:17:09 -0800203
204 /* Finally remove it */
205 o = remove_vm_area((void *)addr);
206 BUG_ON(p != o || o == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 kfree(p);
208}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700209EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
Huang, Ying0947b2f2008-01-30 13:33:44 +0100211static __initdata int after_paging_init;
212static __initdata unsigned long bm_pte[1024]
213 __attribute__((aligned(PAGE_SIZE)));
214
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100215static inline unsigned long * __init early_ioremap_pgd(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100216{
217 return (unsigned long *)swapper_pg_dir + ((addr >> 22) & 1023);
218}
219
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100220static inline unsigned long * __init early_ioremap_pte(unsigned long addr)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100221{
222 return bm_pte + ((addr >> PAGE_SHIFT) & 1023);
223}
224
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100225void __init early_ioremap_init(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100226{
227 unsigned long *pgd;
228
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100229 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100230 *pgd = __pa(bm_pte) | _PAGE_TABLE;
231 memset(bm_pte, 0, sizeof(bm_pte));
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100232 BUG_ON(pgd != early_ioremap_pgd(fix_to_virt(FIX_BTMAP_END)));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100233}
234
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100235void __init early_ioremap_clear(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100236{
237 unsigned long *pgd;
238
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100239 pgd = early_ioremap_pgd(fix_to_virt(FIX_BTMAP_BEGIN));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100240 *pgd = 0;
241 __flush_tlb_all();
242}
243
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100244void __init early_ioremap_reset(void)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100245{
246 enum fixed_addresses idx;
247 unsigned long *pte, phys, addr;
248
249 after_paging_init = 1;
Huang, Ying64a8f852008-01-30 13:33:44 +0100250 for (idx = FIX_BTMAP_BEGIN; idx >= FIX_BTMAP_END; idx--) {
Huang, Ying0947b2f2008-01-30 13:33:44 +0100251 addr = fix_to_virt(idx);
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100252 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100253 if (!*pte & _PAGE_PRESENT) {
254 phys = *pte & PAGE_MASK;
255 set_fixmap(idx, phys);
256 }
257 }
258}
259
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100260static void __init __early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100261 unsigned long phys, pgprot_t flags)
262{
263 unsigned long *pte, addr = __fix_to_virt(idx);
264
265 if (idx >= __end_of_fixed_addresses) {
266 BUG();
267 return;
268 }
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100269 pte = early_ioremap_pte(addr);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100270 if (pgprot_val(flags))
271 *pte = (phys & PAGE_MASK) | pgprot_val(flags);
272 else
273 *pte = 0;
274 __flush_tlb_one(addr);
275}
276
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100277static inline void __init early_set_fixmap(enum fixed_addresses idx,
Huang, Ying0947b2f2008-01-30 13:33:44 +0100278 unsigned long phys)
279{
280 if (after_paging_init)
281 set_fixmap(idx, phys);
282 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100283 __early_set_fixmap(idx, phys, PAGE_KERNEL);
Huang, Ying0947b2f2008-01-30 13:33:44 +0100284}
285
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100286static inline void __init early_clear_fixmap(enum fixed_addresses idx)
Huang, Ying0947b2f2008-01-30 13:33:44 +0100287{
288 if (after_paging_init)
289 clear_fixmap(idx);
290 else
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100291 __early_set_fixmap(idx, 0, __pgprot(0));
Huang, Ying0947b2f2008-01-30 13:33:44 +0100292}
293
Ingo Molnar1b42f512008-01-30 13:33:45 +0100294
295int __initdata early_ioremap_nested;
296
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100297void __init *early_ioremap(unsigned long phys_addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298{
299 unsigned long offset, last_addr;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100300 unsigned int nrpages, nesting;
301 enum fixed_addresses idx0, idx;
302
303 WARN_ON(system_state != SYSTEM_BOOTING);
304
305 nesting = early_ioremap_nested;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700306
307 /* Don't allow wraparound or zero size */
308 last_addr = phys_addr + size - 1;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100309 if (!size || last_addr < phys_addr) {
310 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100312 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100314 if (nesting >= FIX_BTMAPS_NESTING) {
315 WARN_ON(1);
Ingo Molnar1b42f512008-01-30 13:33:45 +0100316 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100317 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100318 early_ioremap_nested++;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319 /*
320 * Mappings have to be page-aligned
321 */
322 offset = phys_addr & ~PAGE_MASK;
323 phys_addr &= PAGE_MASK;
324 size = PAGE_ALIGN(last_addr) - phys_addr;
325
326 /*
327 * Mappings have to fit in the FIX_BTMAP area.
328 */
329 nrpages = size >> PAGE_SHIFT;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100330 if (nrpages > NR_FIX_BTMAPS) {
331 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332 return NULL;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100333 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334
335 /*
336 * Ok, go for it..
337 */
Ingo Molnar1b42f512008-01-30 13:33:45 +0100338 idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
339 idx = idx0;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100341 early_set_fixmap(idx, phys_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 phys_addr += PAGE_SIZE;
343 --idx;
344 --nrpages;
345 }
Ingo Molnar1b42f512008-01-30 13:33:45 +0100346
347 return (void*) (offset + fix_to_virt(idx0));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700348}
349
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100350void __init early_iounmap(void *addr, unsigned long size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351{
352 unsigned long virt_addr;
353 unsigned long offset;
354 unsigned int nrpages;
355 enum fixed_addresses idx;
Ingo Molnar1b42f512008-01-30 13:33:45 +0100356 unsigned int nesting;
357
358 nesting = --early_ioremap_nested;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100359 WARN_ON(nesting < 0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360
361 virt_addr = (unsigned long)addr;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100362 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) {
363 WARN_ON(1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 return;
Ingo Molnarbd796ed2008-01-30 13:33:45 +0100365 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 offset = virt_addr & ~PAGE_MASK;
367 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
368
Ingo Molnar1b42f512008-01-30 13:33:45 +0100369 idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*nesting;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 while (nrpages > 0) {
Huang, Yingbeacfaa2008-01-30 13:33:44 +0100371 early_clear_fixmap(idx);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700372 --idx;
373 --nrpages;
374 }
375}
Ingo Molnar1b42f512008-01-30 13:33:45 +0100376
377void __this_fixmap_does_not_exist(void)
378{
379 WARN_ON(1);
380}