blob: ae207064201e19697160a332a3399f9a0594b2f8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/x86_64/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
10
11#include <linux/vmalloc.h>
12#include <linux/init.h>
13#include <linux/slab.h>
14#include <asm/io.h>
15#include <asm/pgalloc.h>
16#include <asm/fixmap.h>
17#include <asm/cacheflush.h>
18#include <asm/tlbflush.h>
19#include <asm/proto.h>
20
21#define ISA_START_ADDRESS 0xa0000
22#define ISA_END_ADDRESS 0x100000
23
24static inline void remap_area_pte(pte_t * pte, unsigned long address, unsigned long size,
25 unsigned long phys_addr, unsigned long flags)
26{
27 unsigned long end;
28 unsigned long pfn;
29
30 address &= ~PMD_MASK;
31 end = address + size;
32 if (end > PMD_SIZE)
33 end = PMD_SIZE;
34 if (address >= end)
35 BUG();
36 pfn = phys_addr >> PAGE_SHIFT;
37 do {
38 if (!pte_none(*pte)) {
39 printk("remap_area_pte: page already exists\n");
40 BUG();
41 }
42 set_pte(pte, pfn_pte(pfn, __pgprot(_PAGE_PRESENT | _PAGE_RW |
43 _PAGE_GLOBAL | _PAGE_DIRTY | _PAGE_ACCESSED | flags)));
44 address += PAGE_SIZE;
45 pfn++;
46 pte++;
47 } while (address && (address < end));
48}
49
50static inline int remap_area_pmd(pmd_t * pmd, unsigned long address, unsigned long size,
51 unsigned long phys_addr, unsigned long flags)
52{
53 unsigned long end;
54
55 address &= ~PUD_MASK;
56 end = address + size;
57 if (end > PUD_SIZE)
58 end = PUD_SIZE;
59 phys_addr -= address;
60 if (address >= end)
61 BUG();
62 do {
Hugh Dickins872fec12005-10-29 18:16:21 -070063 pte_t * pte = pte_alloc_kernel(pmd, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 if (!pte)
65 return -ENOMEM;
66 remap_area_pte(pte, address, end - address, address + phys_addr, flags);
67 address = (address + PMD_SIZE) & PMD_MASK;
68 pmd++;
69 } while (address && (address < end));
70 return 0;
71}
72
73static inline int remap_area_pud(pud_t * pud, unsigned long address, unsigned long size,
74 unsigned long phys_addr, unsigned long flags)
75{
76 unsigned long end;
77
78 address &= ~PGDIR_MASK;
79 end = address + size;
80 if (end > PGDIR_SIZE)
81 end = PGDIR_SIZE;
82 phys_addr -= address;
83 if (address >= end)
84 BUG();
85 do {
86 pmd_t * pmd = pmd_alloc(&init_mm, pud, address);
87 if (!pmd)
88 return -ENOMEM;
89 remap_area_pmd(pmd, address, end - address, address + phys_addr, flags);
90 address = (address + PUD_SIZE) & PUD_MASK;
91 pud++;
92 } while (address && (address < end));
93 return 0;
94}
95
96static int remap_area_pages(unsigned long address, unsigned long phys_addr,
97 unsigned long size, unsigned long flags)
98{
99 int error;
100 pgd_t *pgd;
101 unsigned long end = address + size;
102
103 phys_addr -= address;
104 pgd = pgd_offset_k(address);
105 flush_cache_all();
106 if (address >= end)
107 BUG();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 do {
109 pud_t *pud;
110 pud = pud_alloc(&init_mm, pgd, address);
111 error = -ENOMEM;
112 if (!pud)
113 break;
114 if (remap_area_pud(pud, address, end - address,
115 phys_addr + address, flags))
116 break;
117 error = 0;
118 address = (address + PGDIR_SIZE) & PGDIR_MASK;
119 pgd++;
120 } while (address && (address < end));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121 flush_tlb_all();
122 return error;
123}
124
125/*
126 * Fix up the linear direct mapping of the kernel to avoid cache attribute
127 * conflicts.
128 */
129static int
130ioremap_change_attr(unsigned long phys_addr, unsigned long size,
131 unsigned long flags)
132{
133 int err = 0;
Andi Kleen7856dfe2005-05-20 14:27:57 -0700134 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
136 unsigned long vaddr = (unsigned long) __va(phys_addr);
137
138 /*
139 * Must use a address here and not struct page because the phys addr
140 * can be a in hole between nodes and not have an memmap entry.
141 */
142 err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
143 if (!err)
144 global_flush_tlb();
145 }
146 return err;
147}
148
149/*
150 * Generic mapping function
151 */
152
153/*
154 * Remap an arbitrary physical address space into the kernel virtual
155 * address space. Needed when the kernel wants to access high addresses
156 * directly.
157 *
158 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
159 * have to convert them into an offset in a page-aligned mapping, but the
160 * caller shouldn't need to know that small detail.
161 */
162void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
163{
164 void * addr;
165 struct vm_struct * area;
166 unsigned long offset, last_addr;
167
168 /* Don't allow wraparound or zero size */
169 last_addr = phys_addr + size - 1;
170 if (!size || last_addr < phys_addr)
171 return NULL;
172
173 /*
174 * Don't remap the low PCI/ISA area, it's always mapped..
175 */
176 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
177 return (__force void __iomem *)phys_to_virt(phys_addr);
178
Matt Tolentino2b976902005-06-23 00:08:06 -0700179#ifdef CONFIG_FLATMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 /*
181 * Don't allow anybody to remap normal RAM that we're using..
182 */
183 if (last_addr < virt_to_phys(high_memory)) {
184 char *t_addr, *t_end;
185 struct page *page;
186
187 t_addr = __va(phys_addr);
188 t_end = t_addr + (size - 1);
189
190 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
191 if(!PageReserved(page))
192 return NULL;
193 }
194#endif
195
196 /*
197 * Mappings have to be page-aligned
198 */
199 offset = phys_addr & ~PAGE_MASK;
200 phys_addr &= PAGE_MASK;
201 size = PAGE_ALIGN(last_addr+1) - phys_addr;
202
203 /*
204 * Ok, go for it..
205 */
206 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
207 if (!area)
208 return NULL;
209 area->phys_addr = phys_addr;
210 addr = area->addr;
211 if (remap_area_pages((unsigned long) addr, phys_addr, size, flags)) {
212 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
213 return NULL;
214 }
Andi Kleen7856dfe2005-05-20 14:27:57 -0700215 if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216 area->flags &= 0xffffff;
217 vunmap(addr);
218 return NULL;
219 }
220 return (__force void __iomem *) (offset + (char *)addr);
221}
222
223/**
224 * ioremap_nocache - map bus memory into CPU space
225 * @offset: bus address of the memory
226 * @size: size of the resource to map
227 *
228 * ioremap_nocache performs a platform specific sequence of operations to
229 * make bus memory CPU accessible via the readb/readw/readl/writeb/
230 * writew/writel functions and the other mmio helpers. The returned
231 * address is not guaranteed to be usable directly as a virtual
232 * address.
233 *
234 * This version of ioremap ensures that the memory is marked uncachable
235 * on the CPU as well as honouring existing caching rules from things like
236 * the PCI bus. Note that there are other caches and buffers on many
237 * busses. In particular driver authors should read up on PCI writes
238 *
239 * It's useful if some control registers are in such an area and
240 * write combining or read caching is not desirable:
241 *
242 * Must be freed with iounmap.
243 */
244
245void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
246{
247 return __ioremap(phys_addr, size, _PAGE_PCD);
248}
249
Andi Kleenbf5421c2005-12-12 22:17:09 -0800250/**
251 * iounmap - Free a IO remapping
252 * @addr: virtual address from ioremap_*
253 *
254 * Caller must ensure there is only one unmapping for the same pointer.
255 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700256void iounmap(volatile void __iomem *addr)
257{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800258 struct vm_struct *p, *o;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259
260 if (addr <= high_memory)
261 return;
262 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
263 addr < phys_to_virt(ISA_END_ADDRESS))
264 return;
265
Al Virob16b88e2005-12-15 09:17:50 +0000266 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800267 /* Use the vm area unlocked, assuming the caller
268 ensures there isn't another iounmap for the same address
269 in parallel. Reuse of the virtual address is prevented by
270 leaving it in the global lists until we're done with it.
271 cpa takes care of the direct mappings. */
272 read_lock(&vmlist_lock);
273 for (p = vmlist; p; p = p->next) {
274 if (p->addr == addr)
275 break;
276 }
277 read_unlock(&vmlist_lock);
278
279 if (!p) {
Andi Kleen7856dfe2005-05-20 14:27:57 -0700280 printk("iounmap: bad address %p\n", addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800281 dump_stack();
282 return;
283 }
284
285 /* Reset the direct mapping. Can block */
286 if (p->flags >> 20)
Andi Kleen7856dfe2005-05-20 14:27:57 -0700287 ioremap_change_attr(p->phys_addr, p->size, 0);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800288
289 /* Finally remove it */
290 o = remove_vm_area((void *)addr);
291 BUG_ON(p != o || o == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292 kfree(p);
293}