blob: 45d7d823c3b85c91dab6740e30278fd0caa628ae [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/x86_64/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
10
11#include <linux/vmalloc.h>
12#include <linux/init.h>
13#include <linux/slab.h>
Andi Kleen2ee60e172006-06-26 13:59:44 +020014#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <asm/io.h>
16#include <asm/pgalloc.h>
17#include <asm/fixmap.h>
18#include <asm/cacheflush.h>
19#include <asm/tlbflush.h>
20#include <asm/proto.h>
21
22#define ISA_START_ADDRESS 0xa0000
23#define ISA_END_ADDRESS 0x100000
24
25static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
26 unsigned long phys_addr, unsigned long flags)
27{
28 unsigned long end;
29 unsigned long pfn;
30
31 address &= ~PMD_MASK;
32 end = address + size;
33 if (end > PMD_SIZE)
34 end = PMD_SIZE;
35 if (address >= end)
36 BUG();
37 pfn = phys_addr >> PAGE_SHIFT;
38 do {
39 if (!pte_none(*pte)) {
40 printk("remap_area_pte: page already exists\n");
41 BUG();
42 }
43 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
44 _PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
45 address += PAGE_SIZE;
46 pfn++;
47 pte++;
48 } while (address && (address < end));
49}
50
51static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
52 unsigned long phys_addr, unsigned long flags)
53{
54 unsigned long end;
55
56 address &= ~PUD_MASK;
57 end = address + size;
58 if (end > PUD_SIZE)
59 end = PUD_SIZE;
60 phys_addr -= address;
61 if (address >= end)
62 BUG();
63 do {
Hugh Dickins872fec12005-10-29 18:16:21 -070064 pte_t * pte = pte_alloc_kernel(pmd, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -070065 if (!pte)
66 return -ENOMEM;
67 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
68 address = (address + PMD_SIZE) & PMD_MASK;
69 pmd++;
70 } while (address && (address < end));
71 return 0;
72}
73
74static inline int remap_area_pud(pud_t * pud, unsigned long address, unsigned long size,
75 unsigned long phys_addr, unsigned long flags)
76{
77 unsigned long end;
78
79 address &= ~PGDIR_MASK;
80 end = address + size;
81 if (end > PGDIR_SIZE)
82 end = PGDIR_SIZE;
83 phys_addr -= address;
84 if (address >= end)
85 BUG();
86 do {
87 pmd_t * pmd = pmd_alloc(&init_mm, pud, address);
88 if (!pmd)
89 return -ENOMEM;
90 remap_area_pmd(pmd, address, end - address, address + phys_addr, flags);
91 address = (address + PUD_SIZE) & PUD_MASK;
92 pud++;
93 } while (address && (address < end));
94 return 0;
95}
96
97static int remap_area_pages(unsigned long address, unsigned long phys_addr,
98 unsigned long size, unsigned long flags)
99{
100 int error;
101 pgd_t *pgd;
102 unsigned long end = address + size;
103
104 phys_addr -= address;
105 pgd = pgd_offset_k(address);
106 flush_cache_all();
107 if (address >= end)
108 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109 do {
110 pud_t *pud;
111 pud = pud_alloc(&init_mm, pgd, address);
112 error = -ENOMEM;
113 if (!pud)
114 break;
115 if (remap_area_pud(pud, address, end - address,
116 phys_addr + address, flags))
117 break;
118 error = 0;
119 address = (address + PGDIR_SIZE) & PGDIR_MASK;
120 pgd++;
121 } while (address && (address < end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122 flush_tlb_all();
123 return error;
124}
125
126/*
127 * Fix up the linear direct mapping of the kernel to avoid cache attribute
128 * conflicts.
129 */
130static int
131ioremap_change_attr(unsigned long phys_addr, unsigned long size,
132 unsigned long flags)
133{
134 int err = 0;
Andi Kleen7856dfe2005-05-20 14:27:57 -0700135 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
137 unsigned long vaddr = (unsigned long) __va(phys_addr);
138
139 /*
140 * Must use a address here and not struct page because the phys addr
141 * can be a in hole between nodes and not have an memmap entry.
142 */
143 err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
144 if (!err)
145 global_flush_tlb();
146 }
147 return err;
148}
149
150/*
151 * Generic mapping function
152 */
153
154/*
155 * Remap an arbitrary physical address space into the kernel virtual
156 * address space. Needed when the kernel wants to access high addresses
157 * directly.
158 *
159 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
160 * have to convert them into an offset in a page-aligned mapping, but the
161 * caller shouldn't need to know that small detail.
162 */
163void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
164{
165 void * addr;
166 struct vm_struct * area;
167 unsigned long offset, last_addr;
168
169 /* Don't allow wraparound or zero size */
170 last_addr = phys_addr + size - 1;
171 if (!size || last_addr < phys_addr)
172 return NULL;
173
174 /*
175 * Don't remap the low PCI/ISA area, it's always mapped..
176 */
177 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
178 return (__force void __iomem *)phys_to_virt(phys_addr);
179
Matt Tolentino2b976902005-06-23 00:08:06 -0700180#ifdef CONFIG_FLATMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -0700181 /*
182 * Don't allow anybody to remap normal RAM that we're using..
183 */
184 if (last_addr < virt_to_phys(high_memory)) {
185 char *t_addr, *t_end;
186 struct page *page;
187
188 t_addr = __va(phys_addr);
189 t_end = t_addr + (size - 1);
190
191 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
192 if(!PageReserved(page))
193 return NULL;
194 }
195#endif
196
197 /*
198 * Mappings have to be page-aligned
199 */
200 offset = phys_addr & ~PAGE_MASK;
201 phys_addr &= PAGE_MASK;
202 size = PAGE_ALIGN(last_addr+1) - phys_addr;
203
204 /*
205 * Ok, go for it..
206 */
207 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
208 if (!area)
209 return NULL;
210 area->phys_addr = phys_addr;
211 addr = area->addr;
212 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
213 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
214 return NULL;
215 }
Andi Kleen7856dfe2005-05-20 14:27:57 -0700216 if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217 area->flags &= 0xffffff;
218 vunmap(addr);
219 return NULL;
220 }
221 return (__force void __iomem *) (offset + (char *)addr);
222}
Andi Kleen2ee60e172006-06-26 13:59:44 +0200223EXPORT_SYMBOL(__ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224
225/**
226 * ioremap_nocache - map bus memory into CPU space
227 * @offset: bus address of the memory
228 * @size: size of the resource to map
229 *
230 * ioremap_nocache performs a platform specific sequence of operations to
231 * make bus memory CPU accessible via the readb/readw/readl/writeb/
232 * writew/writel functions and the other mmio helpers. The returned
233 * address is not guaranteed to be usable directly as a virtual
234 * address.
235 *
236 * This version of ioremap ensures that the memory is marked uncachable
237 * on the CPU as well as honouring existing caching rules from things like
238 * the PCI bus. Note that there are other caches and buffers on many
239 * busses. In particular driver authors should read up on PCI writes
240 *
241 * It's useful if some control registers are in such an area and
242 * write combining or read caching is not desirable:
243 *
244 * Must be freed with iounmap.
245 */
246
247void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
248{
249 return __ioremap(phys_addr, size, _PAGE_PCD);
250}
Andi Kleen2ee60e172006-06-26 13:59:44 +0200251EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252
Andi Kleenbf5421c2005-12-12 22:17:09 -0800253/**
254 * iounmap - Free a IO remapping
255 * @addr: virtual address from ioremap_*
256 *
257 * Caller must ensure there is only one unmapping for the same pointer.
258 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259void iounmap(volatile void __iomem *addr)
260{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800261 struct vm_struct *p, *o;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700262
263 if (addr <= high_memory)
264 return;
265 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
266 addr < phys_to_virt(ISA_END_ADDRESS))
267 return;
268
Al Virob16b88e2005-12-15 09:17:50 +0000269 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800270 /* Use the vm area unlocked, assuming the caller
271 ensures there isn't another iounmap for the same address
272 in parallel. Reuse of the virtual address is prevented by
273 leaving it in the global lists until we're done with it.
274 cpa takes care of the direct mappings. */
275 read_lock(&vmlist_lock);
276 for (p = vmlist; p; p = p->next) {
277 if (p->addr == addr)
278 break;
279 }
280 read_unlock(&vmlist_lock);
281
282 if (!p) {
Andi Kleen7856dfe2005-05-20 14:27:57 -0700283 printk("iounmap: bad address %p\n", addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800284 dump_stack();
285 return;
286 }
287
288 /* Reset the direct mapping. Can block */
289 if (p->flags >> 20)
Andi Kleen7856dfe2005-05-20 14:27:57 -0700290 ioremap_change_attr(p->phys_addr, p->size, 0);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800291
292 /* Finally remove it */
293 o = remove_vm_area((void *)addr);
294 BUG_ON(p != o || o == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295 kfree(p);
296}
Andi Kleen2ee60e172006-06-26 13:59:44 +0200297EXPORT_SYMBOL(iounmap);
298