blob: 6cac90aa5032aac3900c0756e669871309a2b8f4 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/x86_64/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
9 */
10
11#include <linux/vmalloc.h>
12#include <linux/init.h>
13#include <linux/slab.h>
Andi Kleen2ee60e172006-06-26 13:59:44 +020014#include <linux/module.h>
Haavard Skinnemoen16c564b2006-09-30 23:29:19 -070015#include <linux/io.h>
Linus Torvaldse3ebadd2007-05-07 08:44:24 -070016
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <asm/pgalloc.h>
18#include <asm/fixmap.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070019#include <asm/tlbflush.h>
Haavard Skinnemoen16c564b2006-09-30 23:29:19 -070020#include <asm/cacheflush.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/proto.h>
22
Linus Torvaldse3ebadd2007-05-07 08:44:24 -070023unsigned long __phys_addr(unsigned long x)
24{
25 if (x >= __START_KERNEL_map)
26 return x - __START_KERNEL_map + phys_base;
27 return x - PAGE_OFFSET;
28}
29EXPORT_SYMBOL(__phys_addr);
30
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#define ISA_START_ADDRESS 0xa0000
32#define ISA_END_ADDRESS 0x100000
33
Linus Torvalds1da177e2005-04-16 15:20:36 -070034/*
35 * Fix up the linear direct mapping of the kernel to avoid cache attribute
36 * conflicts.
37 */
38static int
39ioremap_change_attr(unsigned long phys_addr, unsigned long size,
40 unsigned long flags)
41{
42 int err = 0;
Andi Kleen7856dfe2005-05-20 14:27:57 -070043 if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
45 unsigned long vaddr = (unsigned long) __va(phys_addr);
46
47 /*
48 * Must use a address here and not struct page because the phys addr
49 * can be a in hole between nodes and not have an memmap entry.
50 */
51 err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags));
52 if (!err)
53 global_flush_tlb();
54 }
55 return err;
56}
57
58/*
59 * Generic mapping function
60 */
61
62/*
63 * Remap an arbitrary physical address space into the kernel virtual
64 * address space. Needed when the kernel wants to access high addresses
65 * directly.
66 *
67 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
68 * have to convert them into an offset in a page-aligned mapping, but the
69 * caller shouldn't need to know that small detail.
70 */
71void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags)
72{
73 void * addr;
74 struct vm_struct * area;
75 unsigned long offset, last_addr;
Haavard Skinnemoen16c564b2006-09-30 23:29:19 -070076 pgprot_t pgprot;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
78 /* Don't allow wraparound or zero size */
79 last_addr = phys_addr + size - 1;
80 if (!size || last_addr < phys_addr)
81 return NULL;
82
83 /*
84 * Don't remap the low PCI/ISA area, it's always mapped..
85 */
86 if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS)
87 return (__force void __iomem *)phys_to_virt(phys_addr);
88
Matt Tolentino2b976902005-06-23 00:08:06 -070089#ifdef CONFIG_FLATMEM
Linus Torvalds1da177e2005-04-16 15:20:36 -070090 /*
91 * Don't allow anybody to remap normal RAM that we're using..
92 */
93 if (last_addr < virt_to_phys(high_memory)) {
94 char *t_addr, *t_end;
95 struct page *page;
96
97 t_addr = __va(phys_addr);
98 t_end = t_addr + (size - 1);
99
100 for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++)
101 if(!PageReserved(page))
102 return NULL;
103 }
104#endif
105
Haavard Skinnemoen16c564b2006-09-30 23:29:19 -0700106 pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_GLOBAL
107 | _PAGE_DIRTY | _PAGE_ACCESSED | flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108 /*
109 * Mappings have to be page-aligned
110 */
111 offset = phys_addr & ~PAGE_MASK;
112 phys_addr &= PAGE_MASK;
113 size = PAGE_ALIGN(last_addr+1) - phys_addr;
114
115 /*
116 * Ok, go for it..
117 */
118 area = get_vm_area(size, VM_IOREMAP | (flags << 20));
119 if (!area)
120 return NULL;
121 area->phys_addr = phys_addr;
122 addr = area->addr;
Haavard Skinnemoen16c564b2006-09-30 23:29:19 -0700123 if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size,
124 phys_addr, pgprot)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700125 remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr));
126 return NULL;
127 }
Andi Kleen7856dfe2005-05-20 14:27:57 -0700128 if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700129 area->flags &= 0xffffff;
130 vunmap(addr);
131 return NULL;
132 }
133 return (__force void __iomem *) (offset + (char *)addr);
134}
Andi Kleen2ee60e172006-06-26 13:59:44 +0200135EXPORT_SYMBOL(__ioremap);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136
137/**
138 * ioremap_nocache - map bus memory into CPU space
139 * @offset: bus address of the memory
140 * @size: size of the resource to map
141 *
142 * ioremap_nocache performs a platform specific sequence of operations to
143 * make bus memory CPU accessible via the readb/readw/readl/writeb/
144 * writew/writel functions and the other mmio helpers. The returned
145 * address is not guaranteed to be usable directly as a virtual
146 * address.
147 *
148 * This version of ioremap ensures that the memory is marked uncachable
149 * on the CPU as well as honouring existing caching rules from things like
150 * the PCI bus. Note that there are other caches and buffers on many
151 * busses. In particular driver authors should read up on PCI writes
152 *
153 * It's useful if some control registers are in such an area and
154 * write combining or read caching is not desirable:
155 *
156 * Must be freed with iounmap.
157 */
158
159void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size)
160{
161 return __ioremap(phys_addr, size, _PAGE_PCD);
162}
Andi Kleen2ee60e172006-06-26 13:59:44 +0200163EXPORT_SYMBOL(ioremap_nocache);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
Andi Kleenbf5421c2005-12-12 22:17:09 -0800165/**
166 * iounmap - Free a IO remapping
167 * @addr: virtual address from ioremap_*
168 *
169 * Caller must ensure there is only one unmapping for the same pointer.
170 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171void iounmap(volatile void __iomem *addr)
172{
Andi Kleenbf5421c2005-12-12 22:17:09 -0800173 struct vm_struct *p, *o;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174
175 if (addr <= high_memory)
176 return;
177 if (addr >= phys_to_virt(ISA_START_ADDRESS) &&
178 addr < phys_to_virt(ISA_END_ADDRESS))
179 return;
180
Al Virob16b88e2005-12-15 09:17:50 +0000181 addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800182 /* Use the vm area unlocked, assuming the caller
183 ensures there isn't another iounmap for the same address
184 in parallel. Reuse of the virtual address is prevented by
185 leaving it in the global lists until we're done with it.
186 cpa takes care of the direct mappings. */
187 read_lock(&vmlist_lock);
188 for (p = vmlist; p; p = p->next) {
189 if (p->addr == addr)
190 break;
191 }
192 read_unlock(&vmlist_lock);
193
194 if (!p) {
Andi Kleen7856dfe2005-05-20 14:27:57 -0700195 printk("iounmap: bad address %p\n", addr);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800196 dump_stack();
197 return;
198 }
199
200 /* Reset the direct mapping. Can block */
201 if (p->flags >> 20)
Andi Kleen7856dfe2005-05-20 14:27:57 -0700202 ioremap_change_attr(p->phys_addr, p->size, 0);
Andi Kleenbf5421c2005-12-12 22:17:09 -0800203
204 /* Finally remove it */
205 o = remove_vm_area((void *)addr);
206 BUG_ON(p != o || o == NULL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 kfree(p);
208}
Andi Kleen2ee60e172006-06-26 13:59:44 +0200209EXPORT_SYMBOL(iounmap);
210