Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * arch/x86_64/mm/ioremap.c |
| 3 | * |
| 4 | * Re-map IO memory to kernel address space so that we can access it. |
| 5 | * This is needed for high PCI addresses that aren't mapped in the |
| 6 | * 640k-1MB IO memory area on PC's |
| 7 | * |
| 8 | * (C) Copyright 1995 1996 Linus Torvalds |
| 9 | */ |
| 10 | |
| 11 | #include <linux/vmalloc.h> |
| 12 | #include <linux/init.h> |
| 13 | #include <linux/slab.h> |
Andi Kleen | 2ee60e17 | 2006-06-26 13:59:44 +0200 | [diff] [blame] | 14 | #include <linux/module.h> |
Haavard Skinnemoen | 16c564b | 2006-09-30 23:29:19 -0700 | [diff] [blame] | 15 | #include <linux/io.h> |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 16 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <asm/pgalloc.h> |
| 18 | #include <asm/fixmap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 19 | #include <asm/tlbflush.h> |
Haavard Skinnemoen | 16c564b | 2006-09-30 23:29:19 -0700 | [diff] [blame] | 20 | #include <asm/cacheflush.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 21 | #include <asm/proto.h> |
| 22 | |
Linus Torvalds | e3ebadd | 2007-05-07 08:44:24 -0700 | [diff] [blame] | 23 | unsigned long __phys_addr(unsigned long x) |
| 24 | { |
| 25 | if (x >= __START_KERNEL_map) |
| 26 | return x - __START_KERNEL_map + phys_base; |
| 27 | return x - PAGE_OFFSET; |
| 28 | } |
| 29 | EXPORT_SYMBOL(__phys_addr); |
| 30 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | #define ISA_START_ADDRESS 0xa0000 |
| 32 | #define ISA_END_ADDRESS 0x100000 |
| 33 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | /* |
| 35 | * Fix up the linear direct mapping of the kernel to avoid cache attribute |
| 36 | * conflicts. |
| 37 | */ |
| 38 | static int |
| 39 | ioremap_change_attr(unsigned long phys_addr, unsigned long size, |
| 40 | unsigned long flags) |
| 41 | { |
| 42 | int err = 0; |
Andi Kleen | 7856dfe | 2005-05-20 14:27:57 -0700 | [diff] [blame] | 43 | if (phys_addr + size - 1 < (end_pfn_map << PAGE_SHIFT)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 44 | unsigned long npages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 45 | unsigned long vaddr = (unsigned long) __va(phys_addr); |
| 46 | |
| 47 | /* |
| 48 | * Must use a address here and not struct page because the phys addr |
| 49 | * can be a in hole between nodes and not have an memmap entry. |
| 50 | */ |
| 51 | err = change_page_attr_addr(vaddr,npages,__pgprot(__PAGE_KERNEL|flags)); |
| 52 | if (!err) |
| 53 | global_flush_tlb(); |
| 54 | } |
| 55 | return err; |
| 56 | } |
| 57 | |
| 58 | /* |
| 59 | * Generic mapping function |
| 60 | */ |
| 61 | |
| 62 | /* |
| 63 | * Remap an arbitrary physical address space into the kernel virtual |
| 64 | * address space. Needed when the kernel wants to access high addresses |
| 65 | * directly. |
| 66 | * |
| 67 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously |
| 68 | * have to convert them into an offset in a page-aligned mapping, but the |
| 69 | * caller shouldn't need to know that small detail. |
| 70 | */ |
| 71 | void __iomem * __ioremap(unsigned long phys_addr, unsigned long size, unsigned long flags) |
| 72 | { |
| 73 | void * addr; |
| 74 | struct vm_struct * area; |
| 75 | unsigned long offset, last_addr; |
Haavard Skinnemoen | 16c564b | 2006-09-30 23:29:19 -0700 | [diff] [blame] | 76 | pgprot_t pgprot; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 77 | |
| 78 | /* Don't allow wraparound or zero size */ |
| 79 | last_addr = phys_addr + size - 1; |
| 80 | if (!size || last_addr < phys_addr) |
| 81 | return NULL; |
| 82 | |
| 83 | /* |
| 84 | * Don't remap the low PCI/ISA area, it's always mapped.. |
| 85 | */ |
| 86 | if (phys_addr >= ISA_START_ADDRESS && last_addr < ISA_END_ADDRESS) |
| 87 | return (__force void __iomem *)phys_to_virt(phys_addr); |
| 88 | |
Matt Tolentino | 2b97690 | 2005-06-23 00:08:06 -0700 | [diff] [blame] | 89 | #ifdef CONFIG_FLATMEM |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 90 | /* |
| 91 | * Don't allow anybody to remap normal RAM that we're using.. |
| 92 | */ |
| 93 | if (last_addr < virt_to_phys(high_memory)) { |
| 94 | char *t_addr, *t_end; |
| 95 | struct page *page; |
| 96 | |
| 97 | t_addr = __va(phys_addr); |
| 98 | t_end = t_addr + (size - 1); |
| 99 | |
| 100 | for(page = virt_to_page(t_addr); page <= virt_to_page(t_end); page++) |
| 101 | if(!PageReserved(page)) |
| 102 | return NULL; |
| 103 | } |
| 104 | #endif |
| 105 | |
Haavard Skinnemoen | 16c564b | 2006-09-30 23:29:19 -0700 | [diff] [blame] | 106 | pgprot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_GLOBAL |
| 107 | | _PAGE_DIRTY | _PAGE_ACCESSED | flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 108 | /* |
| 109 | * Mappings have to be page-aligned |
| 110 | */ |
| 111 | offset = phys_addr & ~PAGE_MASK; |
| 112 | phys_addr &= PAGE_MASK; |
| 113 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
| 114 | |
| 115 | /* |
| 116 | * Ok, go for it.. |
| 117 | */ |
| 118 | area = get_vm_area(size, VM_IOREMAP | (flags << 20)); |
| 119 | if (!area) |
| 120 | return NULL; |
| 121 | area->phys_addr = phys_addr; |
| 122 | addr = area->addr; |
Haavard Skinnemoen | 16c564b | 2006-09-30 23:29:19 -0700 | [diff] [blame] | 123 | if (ioremap_page_range((unsigned long)addr, (unsigned long)addr + size, |
| 124 | phys_addr, pgprot)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | remove_vm_area((void *)(PAGE_MASK & (unsigned long) addr)); |
| 126 | return NULL; |
| 127 | } |
Andi Kleen | 7856dfe | 2005-05-20 14:27:57 -0700 | [diff] [blame] | 128 | if (flags && ioremap_change_attr(phys_addr, size, flags) < 0) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 129 | area->flags &= 0xffffff; |
| 130 | vunmap(addr); |
| 131 | return NULL; |
| 132 | } |
| 133 | return (__force void __iomem *) (offset + (char *)addr); |
| 134 | } |
Andi Kleen | 2ee60e17 | 2006-06-26 13:59:44 +0200 | [diff] [blame] | 135 | EXPORT_SYMBOL(__ioremap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | |
| 137 | /** |
| 138 | * ioremap_nocache - map bus memory into CPU space |
| 139 | * @offset: bus address of the memory |
| 140 | * @size: size of the resource to map |
| 141 | * |
| 142 | * ioremap_nocache performs a platform specific sequence of operations to |
| 143 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 144 | * writew/writel functions and the other mmio helpers. The returned |
| 145 | * address is not guaranteed to be usable directly as a virtual |
| 146 | * address. |
| 147 | * |
| 148 | * This version of ioremap ensures that the memory is marked uncachable |
| 149 | * on the CPU as well as honouring existing caching rules from things like |
| 150 | * the PCI bus. Note that there are other caches and buffers on many |
| 151 | * busses. In particular driver authors should read up on PCI writes |
| 152 | * |
| 153 | * It's useful if some control registers are in such an area and |
| 154 | * write combining or read caching is not desirable: |
| 155 | * |
| 156 | * Must be freed with iounmap. |
| 157 | */ |
| 158 | |
| 159 | void __iomem *ioremap_nocache (unsigned long phys_addr, unsigned long size) |
| 160 | { |
| 161 | return __ioremap(phys_addr, size, _PAGE_PCD); |
| 162 | } |
Andi Kleen | 2ee60e17 | 2006-06-26 13:59:44 +0200 | [diff] [blame] | 163 | EXPORT_SYMBOL(ioremap_nocache); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 164 | |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 165 | /** |
| 166 | * iounmap - Free a IO remapping |
| 167 | * @addr: virtual address from ioremap_* |
| 168 | * |
| 169 | * Caller must ensure there is only one unmapping for the same pointer. |
| 170 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 171 | void iounmap(volatile void __iomem *addr) |
| 172 | { |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 173 | struct vm_struct *p, *o; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 174 | |
| 175 | if (addr <= high_memory) |
| 176 | return; |
| 177 | if (addr >= phys_to_virt(ISA_START_ADDRESS) && |
| 178 | addr < phys_to_virt(ISA_END_ADDRESS)) |
| 179 | return; |
| 180 | |
Al Viro | b16b88e | 2005-12-15 09:17:50 +0000 | [diff] [blame] | 181 | addr = (volatile void __iomem *)(PAGE_MASK & (unsigned long __force)addr); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 182 | /* Use the vm area unlocked, assuming the caller |
| 183 | ensures there isn't another iounmap for the same address |
| 184 | in parallel. Reuse of the virtual address is prevented by |
| 185 | leaving it in the global lists until we're done with it. |
| 186 | cpa takes care of the direct mappings. */ |
| 187 | read_lock(&vmlist_lock); |
| 188 | for (p = vmlist; p; p = p->next) { |
| 189 | if (p->addr == addr) |
| 190 | break; |
| 191 | } |
| 192 | read_unlock(&vmlist_lock); |
| 193 | |
| 194 | if (!p) { |
Andi Kleen | 7856dfe | 2005-05-20 14:27:57 -0700 | [diff] [blame] | 195 | printk("iounmap: bad address %p\n", addr); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 196 | dump_stack(); |
| 197 | return; |
| 198 | } |
| 199 | |
| 200 | /* Reset the direct mapping. Can block */ |
| 201 | if (p->flags >> 20) |
Andi Kleen | 7856dfe | 2005-05-20 14:27:57 -0700 | [diff] [blame] | 202 | ioremap_change_attr(p->phys_addr, p->size, 0); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 203 | |
| 204 | /* Finally remove it */ |
| 205 | o = remove_vm_area((void *)addr); |
| 206 | BUG_ON(p != o || o == NULL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | kfree(p); |
| 208 | } |
Andi Kleen | 2ee60e17 | 2006-06-26 13:59:44 +0200 | [diff] [blame] | 209 | EXPORT_SYMBOL(iounmap); |
| 210 | |