blob: 8498b5ac3955c54939bec5acca0754ba314238c0 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/i386/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
10
11#include <linux/vmalloc.h>
12#include <linux/init.h>
13#include <linux/slab.h>
Alexey Dobriyan129f6942005-06-23 00:08:33 -070014#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/io.h>
16#include <asm/fixmap.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19#include <asm/pgtable.h>
20
21#define ISA_START_ADDRESS 0xa0000
22#define ISA_END_ADDRESS 0x100000
23
24static int ioremap_pte_range(pmd_t *pmd, unsigned long addr,
25 unsigned long end, unsigned long phys_addr, unsigned long flags)
26{
27 pte_t *pte;
28 unsigned long pfn;
29
30 pfn = phys_addr >> PAGE_SHIFT;
Hugh Dickins872fec12005-10-29 18:16:21 -070031 pte = pte_alloc_kernel(pmd, addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070032 if (!pte)
33 return -ENOMEM;
34 do {
35 BUG_ON(!pte_none(*pte));
36 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
37 _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
38 pfn++;
39 } while (pte++, addr += PAGE_SIZE, addr != end);
40 return 0;
41}
42
43static inline int ioremap_pmd_range(pud_t *pud, unsigned long addr,
44 unsigned long end, unsigned long phys_addr, unsigned long flags)
45{
46 pmd_t *pmd;
47 unsigned long next;
48
49 phys_addr -= addr;
50 pmd = pmd_alloc(&init_mm, pud, addr);
51 if (!pmd)
52 return -ENOMEM;
53 do {
54 next = pmd_addr_end(addr, end);
55 if (ioremap_pte_range(pmd, addr, next, phys_addr + addr, flags))
56 return -ENOMEM;
57 } while (pmd++, addr = next, addr != end);
58 return 0;
59}
60
61static inline int ioremap_pud_range(pgd_t *pgd, unsigned long addr,
62 unsigned long end, unsigned long phys_addr, unsigned long flags)
63{
64 pud_t *pud;
65 unsigned long next;
66
67 phys_addr -= addr;
68 pud = pud_alloc(&init_mm, pgd, addr);
69 if (!pud)
70 return -ENOMEM;
71 do {
72 next = pud_addr_end(addr, end);
73 if (ioremap_pmd_range(pud, addr, next, phys_addr + addr, flags))
74 return -ENOMEM;
75 } while (pud++, addr = next, addr != end);
76 return 0;
77}
78
79static int ioremap_page_range(unsigned long addr,
80 unsigned long end, unsigned long phys_addr, unsigned long flags)
81{
82 pgd_t *pgd;
83 unsigned long next;
84 int err;
85
86 BUG_ON(addr >= end);
87 flush_cache_all();
88 phys_addr -= addr;
89 pgd = pgd_offset_k(addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 do {
91 next = pgd_addr_end(addr, end);
92 err = ioremap_pud_range(pgd, addr, next, phys_addr+addr, flags);
93 if (err)
94 break;
95 } while (pgd++, addr = next, addr != end);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 flush_tlb_all();
97 return err;
98}
99
100/*
101 * Generic mapping function (not visible outside):
102 */
103
104/*
105 * Remap an arbitrary physical address space into the kernel virtual
106 * address space. Needed when the kernel wants to access high addresses
107 * directly.
108 *
109 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
110 * have to convert them into an offset in a page-aligned mapping, but the
111 * caller shouldn't need to know that small detail.
112 */
113void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
114{
115 void __iomem * addr;
116 struct vm_struct * area;
117 unsigned long offset, last_addr;
118
119 /* Don't allow wraparound or zero size */
120 last_addr = phys_addr + size - 1;
121 if (!size || last_addr < phys_addr)
122 return NULL;
123
124 /*
125 * Don't remap the low PCI/ISA area, it's always mapped..
126 */
127 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
128 return (void __iomem *) phys_to_virt(phys_addr);
129
130 /*
131 * Don't allow anybody to remap normal RAM that we're using..
132 */
133 if (phys_addr <= virt_to_phys(high_memory - 1)) {
134 char *t_addr, *t_end;
135 struct page *page;
136
137 t_addr = __va(phys_addr);
138 t_end = t_addr + (size - 1);
139
140 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
141 if(!PageReserved(page))
142 return NULL;
143 }
144
145 /*
146 * Mappings have to be page-aligned
147 */
148 offset = phys_addr & ~PAGE_MASK;
149 phys_addr &= PAGE_MASK;
150 size = PAGE_ALIGN(last_addr+1) - phys_addr;
151
152 /*
153 * Ok, go for it..
154 */
155 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
156 if (!area)
157 return NULL;
158 area->phys_addr = phys_addr;
159 addr = (void __iomem *) area->addr;
160 if (ioremap_page_range((unsigned long) addr,
161 (unsigned long) addr + size, phys_addr, flags)) {
162 vunmap((void __force *) addr);
163 return NULL;
164 }
165 return (void __iomem *) (offset + (char __iomem *)addr);
166}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700167EXPORT_SYMBOL(__ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168
169/**
170 * ioremap_nocache - map bus memory into CPU space
171 * @offset: bus address of the memory
172 * @size: size of the resource to map
173 *
174 * ioremap_nocache performs a platform specific sequence of operations to
175 * make bus memory CPU accessible via the readb/readw/readl/writeb/
176 * writew/writel functions and the other mmio helpers. The returned
177 * address is not guaranteed to be usable directly as a virtual
178 * address.
179 *
180 * This version of ioremap ensures that the memory is marked uncachable
181 * on the CPU as well as honouring existing caching rules from things like
182 * the PCI bus. Note that there are other caches and buffers on many
183 * busses. In particular driver authors should read up on PCI writes
184 *
185 * It's useful if some control registers are in such an area and
186 * write combining or read caching is not desirable:
187 *
188 * Must be freed with iounmap.
189 */
190
191void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
192{
193 unsigned long last_addr;
194 void __iomem *p = __ioremap(phys_addr, size, _PAGE_PCD);
195 if (!p)
196 return p;
197
198 /* Guaranteed to be > phys_addr, as per __ioremap() */
199 last_addr = phys_addr + size - 1;
200
201 if (last_addr < virt_to_phys(high_memory) - 1) {
202 struct page *ppage = virt_to_page(__va(phys_addr));
203 unsigned long npages;
204
205 phys_addr &= PAGE_MASK;
206
207 /* This might overflow and become zero.. */
208 last_addr = PAGE_ALIGN(last_addr);
209
210 /* .. but that's ok, because modulo-2**n arithmetic will make
211 * the page-aligned "last - first" come out right.
212 */
213 npages = (last_addr - phys_addr) >> PAGE_SHIFT;
214
215 if (change_page_attr(ppage, npages, PAGE_KERNEL_NOCACHE) < 0) {
216 iounmap(p);
217 p = NULL;
218 }
219 global_flush_tlb();
220 }
221
222 return p;
223}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700224EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225
Andi Kleenbf5421c2005-12-12 22:17:09 -0800226/**
227 * iounmap - Free a IO remapping
228 * @addr: virtual address from ioremap_*
229 *
230 * Caller must ensure there is only one unmapping for the same pointer.
231 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232void iounmap(volatile void __iomem *addr)
233{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800234 struct vm_struct *p, *o;
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700235
236 if ((void __force *)addr <= high_memory)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 return;
238
239 /*
240 * __ioremap special-cases the PCI/ISA range by not instantiating a
241 * vm_area and by simply returning an address into the kernel mapping
242 * of ISA space. So handle that here.
243 */
244 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
245 addr < phys_to_virt(ISA_END_ADDRESS))
246 return;
247
Andi Kleenbf5421c2005-12-12 22:17:09 -0800248 addr = (volatile void *)(PAGE_MASK & (unsigned long __force)addr);
249
250 /* Use the vm area unlocked, assuming the caller
251 ensures there isn't another iounmap for the same address
252 in parallel. Reuse of the virtual address is prevented by
253 leaving it in the global lists until we're done with it.
254 cpa takes care of the direct mappings. */
255 read_lock(&vmlist_lock);
256 for (p = vmlist; p; p = p->next) {
257 if (p->addr == addr)
258 break;
259 }
260 read_unlock(&vmlist_lock);
261
262 if (!p) {
263 printk("iounmap: bad address %p\n", addr);
Andrew Mortonc23a4e962005-07-07 17:56:02 -0700264 dump_stack();
Andi Kleenbf5421c2005-12-12 22:17:09 -0800265 return;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700266 }
267
Andi Kleenbf5421c2005-12-12 22:17:09 -0800268 /* Reset the direct mapping. Can block */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 if ((p->flags >> 20) && p->phys_addr < virt_to_phys(high_memory) - 1) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 change_page_attr(virt_to_page(__va(p->phys_addr)),
271 p->size >> PAGE_SHIFT,
272 PAGE_KERNEL);
273 global_flush_tlb();
274 }
Andi Kleenbf5421c2005-12-12 22:17:09 -0800275
276 /* Finally remove it */
277 o = remove_vm_area((void *)addr);
278 BUG_ON(p != o || o == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279 kfree(p);
280}
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700281EXPORT_SYMBOL(iounmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282
283void __init *bt_ioremap(unsigned long phys_addr, unsigned long size)
284{
285 unsigned long offset, last_addr;
286 unsigned int nrpages;
287 enum fixed_addresses idx;
288
289 /* Don't allow wraparound or zero size */
290 last_addr = phys_addr + size - 1;
291 if (!size || last_addr < phys_addr)
292 return NULL;
293
294 /*
295 * Don't remap the low PCI/ISA area, it's always mapped..
296 */
297 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
298 return phys_to_virt(phys_addr);
299
300 /*
301 * Mappings have to be page-aligned
302 */
303 offset = phys_addr & ~PAGE_MASK;
304 phys_addr &= PAGE_MASK;
305 size = PAGE_ALIGN(last_addr) - phys_addr;
306
307 /*
308 * Mappings have to fit in the FIX_BTMAP area.
309 */
310 nrpages = size >> PAGE_SHIFT;
311 if (nrpages > NR_FIX_BTMAPS)
312 return NULL;
313
314 /*
315 * Ok, go for it..
316 */
317 idx = FIX_BTMAP_BEGIN;
318 while (nrpages > 0) {
319 set_fixmap(idx, phys_addr);
320 phys_addr += PAGE_SIZE;
321 --idx;
322 --nrpages;
323 }
324 return (void*) (offset + fix_to_virt(FIX_BTMAP_BEGIN));
325}
326
327void __init bt_iounmap(void *addr, unsigned long size)
328{
329 unsigned long virt_addr;
330 unsigned long offset;
331 unsigned int nrpages;
332 enum fixed_addresses idx;
333
334 virt_addr = (unsigned long)addr;
335 if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN))
336 return;
337 offset = virt_addr & ~PAGE_MASK;
338 nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT;
339
340 idx = FIX_BTMAP_BEGIN;
341 while (nrpages > 0) {
342 clear_fixmap(idx);
343 --idx;
344 --nrpages;
345 }
346}