Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | * Re-map IO memory to kernel address space so that we can access it. |
| 3 | * This is needed for high PCI addresses that aren't mapped in the |
| 4 | * 640k-1MB IO memory area on PC's |
| 5 | * |
| 6 | * (C) Copyright 1995 1996 Linus Torvalds |
| 7 | */ |
| 8 | |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 9 | #include <linux/bootmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/init.h> |
Haavard Skinnemoen | a148ecf | 2006-09-30 23:29:17 -0700 | [diff] [blame] | 11 | #include <linux/io.h> |
Thomas Gleixner | 3cbd09e | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 12 | #include <linux/module.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/vmalloc.h> |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 15 | #include <linux/mmiotrace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 16 | |
Thomas Gleixner | 3cbd09e | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 17 | #include <asm/cacheflush.h> |
| 18 | #include <asm/e820.h> |
| 19 | #include <asm/fixmap.h> |
| 20 | #include <asm/pgtable.h> |
| 21 | #include <asm/tlbflush.h> |
Jeremy Fitzhardinge | f6df72e | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 22 | #include <asm/pgalloc.h> |
venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 23 | #include <asm/pat.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
Jeremy Fitzhardinge | 78c86e5 | 2009-09-10 10:09:38 -0700 | [diff] [blame] | 25 | #include "physaddr.h" |
Thomas Gleixner | 240d3a7 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 26 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | /* |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 28 | * Fix up the linear direct mapping of the kernel to avoid cache attribute |
| 29 | * conflicts. |
| 30 | */ |
venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 31 | int ioremap_change_attr(unsigned long vaddr, unsigned long size, |
| 32 | unsigned long prot_val) |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 33 | { |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 34 | unsigned long nrpages = size >> PAGE_SHIFT; |
Harvey Harrison | 93809be | 2008-02-01 17:49:43 +0100 | [diff] [blame] | 35 | int err; |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 36 | |
venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 37 | switch (prot_val) { |
| 38 | case _PAGE_CACHE_UC: |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 39 | default: |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 40 | err = _set_memory_uc(vaddr, nrpages); |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 41 | break; |
venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 42 | case _PAGE_CACHE_WC: |
| 43 | err = _set_memory_wc(vaddr, nrpages); |
| 44 | break; |
venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 45 | case _PAGE_CACHE_WB: |
venkatesh.pallipadi@intel.com | 1219333 | 2008-03-18 17:00:18 -0700 | [diff] [blame] | 46 | err = _set_memory_wb(vaddr, nrpages); |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 47 | break; |
| 48 | } |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 49 | |
| 50 | return err; |
| 51 | } |
| 52 | |
| 53 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | * Remap an arbitrary physical address space into the kernel virtual |
| 55 | * address space. Needed when the kernel wants to access high addresses |
| 56 | * directly. |
| 57 | * |
| 58 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously |
| 59 | * have to convert them into an offset in a page-aligned mapping, but the |
| 60 | * caller shouldn't need to know that small detail. |
| 61 | */ |
Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 62 | static void __iomem *__ioremap_caller(resource_size_t phys_addr, |
| 63 | unsigned long size, unsigned long prot_val, void *caller) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 64 | { |
Ingo Molnar | 756a6c6 | 2008-03-25 08:31:17 +0100 | [diff] [blame] | 65 | unsigned long pfn, offset, vaddr; |
| 66 | resource_size_t last_addr; |
Pekka Paalanen | 87e547f | 2008-05-12 21:21:03 +0200 | [diff] [blame] | 67 | const resource_size_t unaligned_phys_addr = phys_addr; |
| 68 | const unsigned long unaligned_size = size; |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 69 | struct vm_struct *area; |
venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 70 | unsigned long new_prot_val; |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 71 | pgprot_t prot; |
Venki Pallipadi | dee7cbb | 2008-03-24 14:39:55 -0700 | [diff] [blame] | 72 | int retval; |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 73 | void __iomem *ret_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 74 | |
| 75 | /* Don't allow wraparound or zero size */ |
| 76 | last_addr = phys_addr + size - 1; |
| 77 | if (!size || last_addr < phys_addr) |
| 78 | return NULL; |
| 79 | |
Thomas Gleixner | e3100c8 | 2008-02-27 20:57:40 +0100 | [diff] [blame] | 80 | if (!phys_addr_valid(phys_addr)) { |
venkatesh.pallipadi@intel.com | 6997ab4 | 2008-03-18 17:00:25 -0700 | [diff] [blame] | 81 | printk(KERN_WARNING "ioremap: invalid physical address %llx\n", |
Randy Dunlap | 4c8337a | 2008-04-10 15:09:50 -0700 | [diff] [blame] | 82 | (unsigned long long)phys_addr); |
Thomas Gleixner | e3100c8 | 2008-02-27 20:57:40 +0100 | [diff] [blame] | 83 | WARN_ON_ONCE(1); |
| 84 | return NULL; |
| 85 | } |
| 86 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 87 | /* |
| 88 | * Don't remap the low PCI/ISA area, it's always mapped.. |
| 89 | */ |
Andreas Herrmann | bcc643d | 2008-06-20 21:58:46 +0200 | [diff] [blame] | 90 | if (is_ISA_range(phys_addr, last_addr)) |
Thomas Gleixner | 4b40fce | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 91 | return (__force void __iomem *)phys_to_virt(phys_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | |
| 93 | /* |
Suresh Siddha | 379daf6 | 2008-09-25 18:43:34 -0700 | [diff] [blame] | 94 | * Check if the request spans more than any BAR in the iomem resource |
| 95 | * tree. |
| 96 | */ |
Ingo Molnar | 8808500 | 2008-12-12 09:20:12 +0100 | [diff] [blame] | 97 | WARN_ONCE(iomem_map_sanity_check(phys_addr, size), |
| 98 | KERN_INFO "Info: mapping multiple BARs. Your kernel is fine."); |
Suresh Siddha | 379daf6 | 2008-09-25 18:43:34 -0700 | [diff] [blame] | 99 | |
| 100 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 101 | * Don't allow anybody to remap normal RAM that we're using.. |
| 102 | */ |
Andres Salomon | cb8ab68 | 2008-04-30 11:30:24 -0400 | [diff] [blame] | 103 | for (pfn = phys_addr >> PAGE_SHIFT; |
| 104 | (pfn << PAGE_SHIFT) < (last_addr & PAGE_MASK); |
| 105 | pfn++) { |
Ingo Molnar | bdd3cee | 2008-02-28 14:10:49 +0100 | [diff] [blame] | 106 | |
Ingo Molnar | ba748d2 | 2008-03-03 09:37:41 +0100 | [diff] [blame] | 107 | int is_ram = page_is_ram(pfn); |
| 108 | |
| 109 | if (is_ram && pfn_valid(pfn) && !PageReserved(pfn_to_page(pfn))) |
Thomas Gleixner | 266b9f8 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 110 | return NULL; |
Ingo Molnar | ba748d2 | 2008-03-03 09:37:41 +0100 | [diff] [blame] | 111 | WARN_ON_ONCE(is_ram); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | } |
| 113 | |
venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 114 | /* |
| 115 | * Mappings have to be page-aligned |
| 116 | */ |
| 117 | offset = phys_addr & ~PAGE_MASK; |
| 118 | phys_addr &= PAGE_MASK; |
| 119 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
| 120 | |
Andi Kleen | e213e87 | 2008-08-15 18:12:47 +0200 | [diff] [blame] | 121 | retval = reserve_memtype(phys_addr, (u64)phys_addr + size, |
Venki Pallipadi | dee7cbb | 2008-03-24 14:39:55 -0700 | [diff] [blame] | 122 | prot_val, &new_prot_val); |
| 123 | if (retval) { |
Venkatesh Pallipadi | 279e669 | 2009-07-10 09:57:33 -0700 | [diff] [blame] | 124 | printk(KERN_ERR "ioremap reserve_memtype failed %d\n", retval); |
Venki Pallipadi | dee7cbb | 2008-03-24 14:39:55 -0700 | [diff] [blame] | 125 | return NULL; |
| 126 | } |
| 127 | |
| 128 | if (prot_val != new_prot_val) { |
H. Peter Anvin | b855192 | 2009-08-26 17:17:51 -0700 | [diff] [blame] | 129 | if (!is_new_memtype_allowed(phys_addr, size, |
| 130 | prot_val, new_prot_val)) { |
Venkatesh Pallipadi | 279e669 | 2009-07-10 09:57:33 -0700 | [diff] [blame] | 131 | printk(KERN_ERR |
venkatesh.pallipadi@intel.com | 6997ab4 | 2008-03-18 17:00:25 -0700 | [diff] [blame] | 132 | "ioremap error for 0x%llx-0x%llx, requested 0x%lx, got 0x%lx\n", |
Randy Dunlap | 4c8337a | 2008-04-10 15:09:50 -0700 | [diff] [blame] | 133 | (unsigned long long)phys_addr, |
| 134 | (unsigned long long)(phys_addr + size), |
venkatesh.pallipadi@intel.com | 6997ab4 | 2008-03-18 17:00:25 -0700 | [diff] [blame] | 135 | prot_val, new_prot_val); |
Xiaotian Feng | de2a47c | 2009-11-05 10:43:51 +0800 | [diff] [blame] | 136 | goto err_free_memtype; |
venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 137 | } |
| 138 | prot_val = new_prot_val; |
| 139 | } |
| 140 | |
venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 141 | switch (prot_val) { |
| 142 | case _PAGE_CACHE_UC: |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 143 | default: |
Jeremy Fitzhardinge | be43d72 | 2008-09-07 15:21:13 -0700 | [diff] [blame] | 144 | prot = PAGE_KERNEL_IO_NOCACHE; |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 145 | break; |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 146 | case _PAGE_CACHE_UC_MINUS: |
Jeremy Fitzhardinge | be43d72 | 2008-09-07 15:21:13 -0700 | [diff] [blame] | 147 | prot = PAGE_KERNEL_IO_UC_MINUS; |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 148 | break; |
venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 149 | case _PAGE_CACHE_WC: |
Jeremy Fitzhardinge | be43d72 | 2008-09-07 15:21:13 -0700 | [diff] [blame] | 150 | prot = PAGE_KERNEL_IO_WC; |
venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 151 | break; |
venkatesh.pallipadi@intel.com | 3a96ce8 | 2008-03-18 17:00:16 -0700 | [diff] [blame] | 152 | case _PAGE_CACHE_WB: |
Jeremy Fitzhardinge | be43d72 | 2008-09-07 15:21:13 -0700 | [diff] [blame] | 153 | prot = PAGE_KERNEL_IO; |
Thomas Gleixner | d806e5e | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 154 | break; |
| 155 | } |
Haavard Skinnemoen | a148ecf | 2006-09-30 23:29:17 -0700 | [diff] [blame] | 156 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 158 | * Ok, go for it.. |
| 159 | */ |
Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 160 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 161 | if (!area) |
Xiaotian Feng | de2a47c | 2009-11-05 10:43:51 +0800 | [diff] [blame] | 162 | goto err_free_memtype; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | area->phys_addr = phys_addr; |
Thomas Gleixner | e66aadb | 2008-02-04 16:48:05 +0100 | [diff] [blame] | 164 | vaddr = (unsigned long) area->addr; |
Suresh Siddha | 43a432b | 2009-04-09 14:26:47 -0700 | [diff] [blame] | 165 | |
Xiaotian Feng | de2a47c | 2009-11-05 10:43:51 +0800 | [diff] [blame] | 166 | if (kernel_map_sync_memtype(phys_addr, size, prot_val)) |
| 167 | goto err_free_area; |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 168 | |
Xiaotian Feng | de2a47c | 2009-11-05 10:43:51 +0800 | [diff] [blame] | 169 | if (ioremap_page_range(vaddr, vaddr + size, phys_addr, prot)) |
| 170 | goto err_free_area; |
Thomas Gleixner | e9332ca | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 171 | |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 172 | ret_addr = (void __iomem *) (vaddr + offset); |
Pekka Paalanen | 87e547f | 2008-05-12 21:21:03 +0200 | [diff] [blame] | 173 | mmiotrace_ioremap(unaligned_phys_addr, unaligned_size, ret_addr); |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 174 | |
| 175 | return ret_addr; |
Xiaotian Feng | de2a47c | 2009-11-05 10:43:51 +0800 | [diff] [blame] | 176 | err_free_area: |
| 177 | free_vm_area(area); |
| 178 | err_free_memtype: |
| 179 | free_memtype(phys_addr, phys_addr + size); |
| 180 | return NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 181 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 182 | |
| 183 | /** |
| 184 | * ioremap_nocache - map bus memory into CPU space |
| 185 | * @offset: bus address of the memory |
| 186 | * @size: size of the resource to map |
| 187 | * |
| 188 | * ioremap_nocache performs a platform specific sequence of operations to |
| 189 | * make bus memory CPU accessible via the readb/readw/readl/writeb/ |
| 190 | * writew/writel functions and the other mmio helpers. The returned |
| 191 | * address is not guaranteed to be usable directly as a virtual |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 192 | * address. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 193 | * |
| 194 | * This version of ioremap ensures that the memory is marked uncachable |
| 195 | * on the CPU as well as honouring existing caching rules from things like |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 196 | * the PCI bus. Note that there are other caches and buffers on many |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 197 | * busses. In particular driver authors should read up on PCI writes |
| 198 | * |
| 199 | * It's useful if some control registers are in such an area and |
| 200 | * write combining or read caching is not desirable: |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 201 | * |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 202 | * Must be freed with iounmap. |
| 203 | */ |
Linus Torvalds | b9e76a0 | 2008-03-24 11:22:39 -0700 | [diff] [blame] | 204 | void __iomem *ioremap_nocache(resource_size_t phys_addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | { |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 206 | /* |
| 207 | * Ideally, this should be: |
Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 208 | * pat_enabled ? _PAGE_CACHE_UC : _PAGE_CACHE_UC_MINUS; |
Suresh Siddha | de33c44 | 2008-04-25 17:07:22 -0700 | [diff] [blame] | 209 | * |
| 210 | * Till we fix all X drivers to use ioremap_wc(), we will use |
| 211 | * UC MINUS. |
| 212 | */ |
| 213 | unsigned long val = _PAGE_CACHE_UC_MINUS; |
| 214 | |
| 215 | return __ioremap_caller(phys_addr, size, val, |
Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 216 | __builtin_return_address(0)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 217 | } |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 218 | EXPORT_SYMBOL(ioremap_nocache); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 219 | |
venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 220 | /** |
| 221 | * ioremap_wc - map memory into CPU space write combined |
| 222 | * @offset: bus address of the memory |
| 223 | * @size: size of the resource to map |
| 224 | * |
| 225 | * This version of ioremap ensures that the memory is marked write combining. |
| 226 | * Write combining allows faster writes to some hardware devices. |
| 227 | * |
| 228 | * Must be freed with iounmap. |
| 229 | */ |
venkatesh.pallipadi@intel.com | d639bab | 2009-01-09 16:13:13 -0800 | [diff] [blame] | 230 | void __iomem *ioremap_wc(resource_size_t phys_addr, unsigned long size) |
venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 231 | { |
Andreas Herrmann | 499f8f8 | 2008-06-10 16:06:21 +0200 | [diff] [blame] | 232 | if (pat_enabled) |
Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 233 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WC, |
| 234 | __builtin_return_address(0)); |
venkatesh.pallipadi@intel.com | b310f38 | 2008-03-18 17:00:24 -0700 | [diff] [blame] | 235 | else |
| 236 | return ioremap_nocache(phys_addr, size); |
| 237 | } |
| 238 | EXPORT_SYMBOL(ioremap_wc); |
| 239 | |
Linus Torvalds | b9e76a0 | 2008-03-24 11:22:39 -0700 | [diff] [blame] | 240 | void __iomem *ioremap_cache(resource_size_t phys_addr, unsigned long size) |
Thomas Gleixner | 5f86815 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 241 | { |
Christoph Lameter | 2301696 | 2008-04-28 02:12:42 -0700 | [diff] [blame] | 242 | return __ioremap_caller(phys_addr, size, _PAGE_CACHE_WB, |
| 243 | __builtin_return_address(0)); |
Thomas Gleixner | 5f86815 | 2008-01-30 13:34:06 +0100 | [diff] [blame] | 244 | } |
| 245 | EXPORT_SYMBOL(ioremap_cache); |
| 246 | |
Rik van Riel | 28b2ee2 | 2008-07-23 21:27:05 -0700 | [diff] [blame] | 247 | void __iomem *ioremap_prot(resource_size_t phys_addr, unsigned long size, |
| 248 | unsigned long prot_val) |
| 249 | { |
| 250 | return __ioremap_caller(phys_addr, size, (prot_val & _PAGE_CACHE_MASK), |
| 251 | __builtin_return_address(0)); |
| 252 | } |
| 253 | EXPORT_SYMBOL(ioremap_prot); |
| 254 | |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 255 | /** |
| 256 | * iounmap - Free a IO remapping |
| 257 | * @addr: virtual address from ioremap_* |
| 258 | * |
| 259 | * Caller must ensure there is only one unmapping for the same pointer. |
| 260 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 261 | void iounmap(volatile void __iomem *addr) |
| 262 | { |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 263 | struct vm_struct *p, *o; |
Andrew Morton | c23a4e9 | 2005-07-07 17:56:02 -0700 | [diff] [blame] | 264 | |
| 265 | if ((void __force *)addr <= high_memory) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | return; |
| 267 | |
| 268 | /* |
| 269 | * __ioremap special-cases the PCI/ISA range by not instantiating a |
| 270 | * vm_area and by simply returning an address into the kernel mapping |
| 271 | * of ISA space. So handle that here. |
| 272 | */ |
Thomas Gleixner | 6e92a5a | 2008-05-12 15:43:35 +0200 | [diff] [blame] | 273 | if ((void __force *)addr >= phys_to_virt(ISA_START_ADDRESS) && |
| 274 | (void __force *)addr < phys_to_virt(ISA_END_ADDRESS)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 275 | return; |
| 276 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 277 | addr = (volatile void __iomem *) |
| 278 | (PAGE_MASK & (unsigned long __force)addr); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 279 | |
Pekka Paalanen | d61fc44 | 2008-05-12 21:20:57 +0200 | [diff] [blame] | 280 | mmiotrace_iounmap(addr); |
| 281 | |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 282 | /* Use the vm area unlocked, assuming the caller |
| 283 | ensures there isn't another iounmap for the same address |
| 284 | in parallel. Reuse of the virtual address is prevented by |
| 285 | leaving it in the global lists until we're done with it. |
| 286 | cpa takes care of the direct mappings. */ |
| 287 | read_lock(&vmlist_lock); |
| 288 | for (p = vmlist; p; p = p->next) { |
Thomas Gleixner | 6e92a5a | 2008-05-12 15:43:35 +0200 | [diff] [blame] | 289 | if (p->addr == (void __force *)addr) |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 290 | break; |
| 291 | } |
| 292 | read_unlock(&vmlist_lock); |
| 293 | |
| 294 | if (!p) { |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 295 | printk(KERN_ERR "iounmap: bad address %p\n", addr); |
Andrew Morton | c23a4e9 | 2005-07-07 17:56:02 -0700 | [diff] [blame] | 296 | dump_stack(); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 297 | return; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 298 | } |
| 299 | |
venkatesh.pallipadi@intel.com | d7677d4 | 2008-03-18 17:00:17 -0700 | [diff] [blame] | 300 | free_memtype(p->phys_addr, p->phys_addr + get_vm_area_size(p)); |
| 301 | |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 302 | /* Finally remove it */ |
Thomas Gleixner | 6e92a5a | 2008-05-12 15:43:35 +0200 | [diff] [blame] | 303 | o = remove_vm_area((void __force *)addr); |
Andi Kleen | bf5421c | 2005-12-12 22:17:09 -0800 | [diff] [blame] | 304 | BUG_ON(p != o || o == NULL); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 305 | kfree(p); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 306 | } |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 307 | EXPORT_SYMBOL(iounmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 308 | |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 309 | /* |
| 310 | * Convert a physical pointer to a virtual kernel pointer for /dev/mem |
| 311 | * access |
| 312 | */ |
| 313 | void *xlate_dev_mem_ptr(unsigned long phys) |
| 314 | { |
| 315 | void *addr; |
| 316 | unsigned long start = phys & PAGE_MASK; |
| 317 | |
| 318 | /* If page is RAM, we can use __va. Otherwise ioremap and unmap. */ |
| 319 | if (page_is_ram(start >> PAGE_SHIFT)) |
| 320 | return __va(phys); |
| 321 | |
Xiaotian Feng | 2fb8f4e | 2009-11-10 17:23:25 +0800 | [diff] [blame] | 322 | addr = (void __force *)ioremap_cache(start, PAGE_SIZE); |
venkatesh.pallipadi@intel.com | e045fb2 | 2008-03-18 17:00:15 -0700 | [diff] [blame] | 323 | if (addr) |
| 324 | addr = (void *)((unsigned long)addr | (phys & ~PAGE_MASK)); |
| 325 | |
| 326 | return addr; |
| 327 | } |
| 328 | |
| 329 | void unxlate_dev_mem_ptr(unsigned long phys, void *addr) |
| 330 | { |
| 331 | if (page_is_ram(phys >> PAGE_SHIFT)) |
| 332 | return; |
| 333 | |
| 334 | iounmap((void __iomem *)((unsigned long)addr & PAGE_MASK)); |
| 335 | return; |
| 336 | } |
| 337 | |
Jaswinder Singh | 4b6e9f2 | 2008-07-23 17:39:16 +0530 | [diff] [blame] | 338 | static int __initdata early_ioremap_debug; |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 339 | |
| 340 | static int __init early_ioremap_debug_setup(char *str) |
| 341 | { |
| 342 | early_ioremap_debug = 1; |
| 343 | |
Huang, Ying | 793b24a | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 344 | return 0; |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 345 | } |
Huang, Ying | 793b24a | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 346 | early_param("early_ioremap_debug", early_ioremap_debug_setup); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 347 | |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 348 | static __initdata int after_paging_init; |
Jeremy Fitzhardinge | 45c7b28 | 2009-03-20 17:53:34 -0700 | [diff] [blame] | 349 | static pte_t bm_pte[PAGE_SIZE/sizeof(pte_t)] __page_aligned_bss; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 350 | |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 351 | static inline pmd_t * __init early_ioremap_pmd(unsigned long addr) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 352 | { |
Jeremy Fitzhardinge | 37cc8d7 | 2008-02-13 16:20:35 +0100 | [diff] [blame] | 353 | /* Don't assume we're using swapper_pg_dir at this point */ |
| 354 | pgd_t *base = __va(read_cr3()); |
| 355 | pgd_t *pgd = &base[pgd_index(addr)]; |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 356 | pud_t *pud = pud_offset(pgd, addr); |
| 357 | pmd_t *pmd = pmd_offset(pud, addr); |
| 358 | |
| 359 | return pmd; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 360 | } |
| 361 | |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 362 | static inline pte_t * __init early_ioremap_pte(unsigned long addr) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 363 | { |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 364 | return &bm_pte[pte_index(addr)]; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 365 | } |
| 366 | |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 367 | static unsigned long slot_virt[FIX_BTMAPS_SLOTS] __initdata; |
| 368 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 369 | void __init early_ioremap_init(void) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 370 | { |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 371 | pmd_t *pmd; |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 372 | int i; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 373 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 374 | if (early_ioremap_debug) |
Ingo Molnar | adafdf6 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 375 | printk(KERN_INFO "early_ioremap_init()\n"); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 376 | |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 377 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) |
Wang Chen | 9f4f25c | 2009-03-25 14:07:11 +0100 | [diff] [blame] | 378 | slot_virt[i] = __fix_to_virt(FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*i); |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 379 | |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 380 | pmd = early_ioremap_pmd(fix_to_virt(FIX_BTMAP_BEGIN)); |
Jeremy Fitzhardinge | 45c7b28 | 2009-03-20 17:53:34 -0700 | [diff] [blame] | 381 | memset(bm_pte, 0, sizeof(bm_pte)); |
| 382 | pmd_populate_kernel(&init_mm, pmd, bm_pte); |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 383 | |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 384 | /* |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 385 | * The boot-ioremap range spans multiple pmds, for which |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 386 | * we are not prepared: |
| 387 | */ |
Jan Beulich | 499a5f1 | 2009-12-18 16:05:51 +0000 | [diff] [blame] | 388 | #define __FIXADDR_TOP (-PAGE_SIZE) |
| 389 | BUILD_BUG_ON((__fix_to_virt(FIX_BTMAP_BEGIN) >> PMD_SHIFT) |
| 390 | != (__fix_to_virt(FIX_BTMAP_END) >> PMD_SHIFT)); |
| 391 | #undef __FIXADDR_TOP |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 392 | if (pmd != early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))) { |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 393 | WARN_ON(1); |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 394 | printk(KERN_WARNING "pmd %p != %p\n", |
| 395 | pmd, early_ioremap_pmd(fix_to_virt(FIX_BTMAP_END))); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 396 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_BEGIN): %08lx\n", |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 397 | fix_to_virt(FIX_BTMAP_BEGIN)); |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 398 | printk(KERN_WARNING "fix_to_virt(FIX_BTMAP_END): %08lx\n", |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 399 | fix_to_virt(FIX_BTMAP_END)); |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 400 | |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 401 | printk(KERN_WARNING "FIX_BTMAP_END: %d\n", FIX_BTMAP_END); |
| 402 | printk(KERN_WARNING "FIX_BTMAP_BEGIN: %d\n", |
| 403 | FIX_BTMAP_BEGIN); |
Ingo Molnar | 0e3a954 | 2008-01-30 13:33:49 +0100 | [diff] [blame] | 404 | } |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 405 | } |
| 406 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 407 | void __init early_ioremap_reset(void) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 408 | { |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 409 | after_paging_init = 1; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 410 | } |
| 411 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 412 | static void __init __early_set_fixmap(enum fixed_addresses idx, |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 413 | phys_addr_t phys, pgprot_t flags) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 414 | { |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 415 | unsigned long addr = __fix_to_virt(idx); |
| 416 | pte_t *pte; |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 417 | |
| 418 | if (idx >= __end_of_fixed_addresses) { |
| 419 | BUG(); |
| 420 | return; |
| 421 | } |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 422 | pte = early_ioremap_pte(addr); |
Jeremy Fitzhardinge | 4583ed5 | 2008-06-25 00:19:03 -0400 | [diff] [blame] | 423 | |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 424 | if (pgprot_val(flags)) |
Ian Campbell | 551889a | 2008-02-09 23:24:09 +0100 | [diff] [blame] | 425 | set_pte(pte, pfn_pte(phys >> PAGE_SHIFT, flags)); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 426 | else |
Jeremy Fitzhardinge | 4f9c11d | 2008-06-25 00:19:19 -0400 | [diff] [blame] | 427 | pte_clear(&init_mm, addr, pte); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 428 | __flush_tlb_one(addr); |
| 429 | } |
| 430 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 431 | static inline void __init early_set_fixmap(enum fixed_addresses idx, |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 432 | phys_addr_t phys, pgprot_t prot) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 433 | { |
| 434 | if (after_paging_init) |
Jeremy Fitzhardinge | 1494177 | 2008-09-07 15:21:15 -0700 | [diff] [blame] | 435 | __set_fixmap(idx, phys, prot); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 436 | else |
Jeremy Fitzhardinge | 1494177 | 2008-09-07 15:21:15 -0700 | [diff] [blame] | 437 | __early_set_fixmap(idx, phys, prot); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 438 | } |
| 439 | |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 440 | static inline void __init early_clear_fixmap(enum fixed_addresses idx) |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 441 | { |
| 442 | if (after_paging_init) |
| 443 | clear_fixmap(idx); |
| 444 | else |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 445 | __early_set_fixmap(idx, 0, __pgprot(0)); |
Huang, Ying | 0947b2f | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 446 | } |
| 447 | |
Harvey Harrison | 1d6cf1f | 2008-10-28 22:46:04 -0700 | [diff] [blame] | 448 | static void __iomem *prev_map[FIX_BTMAPS_SLOTS] __initdata; |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 449 | static unsigned long prev_size[FIX_BTMAPS_SLOTS] __initdata; |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 450 | |
Liang Li | e67a807 | 2010-04-30 18:01:51 +0800 | [diff] [blame] | 451 | void __init fixup_early_ioremap(void) |
| 452 | { |
| 453 | int i; |
| 454 | |
| 455 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { |
| 456 | if (prev_map[i]) { |
| 457 | WARN_ON(1); |
| 458 | break; |
| 459 | } |
| 460 | } |
| 461 | |
| 462 | early_ioremap_init(); |
| 463 | } |
| 464 | |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 465 | static int __init check_early_ioremap_leak(void) |
| 466 | { |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 467 | int count = 0; |
| 468 | int i; |
| 469 | |
| 470 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) |
| 471 | if (prev_map[i]) |
| 472 | count++; |
| 473 | |
| 474 | if (!count) |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 475 | return 0; |
Arjan van de Ven | 0c072bb | 2008-07-08 09:50:22 -0700 | [diff] [blame] | 476 | WARN(1, KERN_WARNING |
Thomas Gleixner | 91eebf4 | 2008-01-30 13:34:05 +0100 | [diff] [blame] | 477 | "Debug warning: early ioremap leak of %d areas detected.\n", |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 478 | count); |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 479 | printk(KERN_WARNING |
Arjan van de Ven | 0c072bb | 2008-07-08 09:50:22 -0700 | [diff] [blame] | 480 | "please boot with early_ioremap_debug and report the dmesg.\n"); |
Ingo Molnar | d690b2a | 2008-01-30 13:33:47 +0100 | [diff] [blame] | 481 | |
| 482 | return 1; |
| 483 | } |
| 484 | late_initcall(check_early_ioremap_leak); |
| 485 | |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 486 | static void __init __iomem * |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 487 | __early_ioremap(resource_size_t phys_addr, unsigned long size, pgprot_t prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 488 | { |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 489 | unsigned long offset; |
| 490 | resource_size_t last_addr; |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 491 | unsigned int nrpages; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 492 | enum fixed_addresses idx0, idx; |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 493 | int i, slot; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 494 | |
| 495 | WARN_ON(system_state != SYSTEM_BOOTING); |
| 496 | |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 497 | slot = -1; |
| 498 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { |
| 499 | if (!prev_map[i]) { |
| 500 | slot = i; |
| 501 | break; |
| 502 | } |
| 503 | } |
| 504 | |
| 505 | if (slot < 0) { |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 506 | printk(KERN_INFO "early_iomap(%08llx, %08lx) not found slot\n", |
| 507 | (u64)phys_addr, size); |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 508 | WARN_ON(1); |
| 509 | return NULL; |
| 510 | } |
| 511 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 512 | if (early_ioremap_debug) { |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 513 | printk(KERN_INFO "early_ioremap(%08llx, %08lx) [%d] => ", |
| 514 | (u64)phys_addr, size, slot); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 515 | dump_stack(); |
| 516 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | |
| 518 | /* Don't allow wraparound or zero size */ |
| 519 | last_addr = phys_addr + size - 1; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 520 | if (!size || last_addr < phys_addr) { |
| 521 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 522 | return NULL; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 523 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 524 | |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 525 | prev_size[slot] = size; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 526 | /* |
| 527 | * Mappings have to be page-aligned |
| 528 | */ |
| 529 | offset = phys_addr & ~PAGE_MASK; |
| 530 | phys_addr &= PAGE_MASK; |
Alan Cox | c613ec1 | 2008-10-10 10:46:45 +0100 | [diff] [blame] | 531 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | |
| 533 | /* |
| 534 | * Mappings have to fit in the FIX_BTMAP area. |
| 535 | */ |
| 536 | nrpages = size >> PAGE_SHIFT; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 537 | if (nrpages > NR_FIX_BTMAPS) { |
| 538 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | return NULL; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 540 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 541 | |
| 542 | /* |
| 543 | * Ok, go for it.. |
| 544 | */ |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 545 | idx0 = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 546 | idx = idx0; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 547 | while (nrpages > 0) { |
Jeremy Fitzhardinge | 1494177 | 2008-09-07 15:21:15 -0700 | [diff] [blame] | 548 | early_set_fixmap(idx, phys_addr, prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 549 | phys_addr += PAGE_SIZE; |
| 550 | --idx; |
| 551 | --nrpages; |
| 552 | } |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 553 | if (early_ioremap_debug) |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 554 | printk(KERN_CONT "%08lx + %08lx\n", offset, slot_virt[slot]); |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 555 | |
Wang Chen | 8827247 | 2009-03-07 13:34:19 +0800 | [diff] [blame] | 556 | prev_map[slot] = (void __iomem *)(offset + slot_virt[slot]); |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 557 | return prev_map[slot]; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 558 | } |
| 559 | |
Jeremy Fitzhardinge | 1494177 | 2008-09-07 15:21:15 -0700 | [diff] [blame] | 560 | /* Remap an IO device */ |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 561 | void __init __iomem * |
| 562 | early_ioremap(resource_size_t phys_addr, unsigned long size) |
Jeremy Fitzhardinge | 1494177 | 2008-09-07 15:21:15 -0700 | [diff] [blame] | 563 | { |
| 564 | return __early_ioremap(phys_addr, size, PAGE_KERNEL_IO); |
| 565 | } |
| 566 | |
| 567 | /* Remap memory */ |
Masami Hiramatsu | 9b987ae | 2009-04-09 10:55:33 -0700 | [diff] [blame] | 568 | void __init __iomem * |
| 569 | early_memremap(resource_size_t phys_addr, unsigned long size) |
Jeremy Fitzhardinge | 1494177 | 2008-09-07 15:21:15 -0700 | [diff] [blame] | 570 | { |
| 571 | return __early_ioremap(phys_addr, size, PAGE_KERNEL); |
| 572 | } |
| 573 | |
Harvey Harrison | 1d6cf1f | 2008-10-28 22:46:04 -0700 | [diff] [blame] | 574 | void __init early_iounmap(void __iomem *addr, unsigned long size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 575 | { |
| 576 | unsigned long virt_addr; |
| 577 | unsigned long offset; |
| 578 | unsigned int nrpages; |
| 579 | enum fixed_addresses idx; |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 580 | int i, slot; |
Ingo Molnar | 1b42f51 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 581 | |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 582 | slot = -1; |
| 583 | for (i = 0; i < FIX_BTMAPS_SLOTS; i++) { |
| 584 | if (prev_map[i] == addr) { |
| 585 | slot = i; |
| 586 | break; |
| 587 | } |
| 588 | } |
| 589 | |
| 590 | if (slot < 0) { |
| 591 | printk(KERN_INFO "early_iounmap(%p, %08lx) not found slot\n", |
| 592 | addr, size); |
| 593 | WARN_ON(1); |
Ingo Molnar | 226e9a9 | 2008-05-27 09:56:49 +0200 | [diff] [blame] | 594 | return; |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 595 | } |
| 596 | |
| 597 | if (prev_size[slot] != size) { |
| 598 | printk(KERN_INFO "early_iounmap(%p, %08lx) [%d] size not consistent %08lx\n", |
| 599 | addr, size, slot, prev_size[slot]); |
| 600 | WARN_ON(1); |
| 601 | return; |
| 602 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 603 | |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 604 | if (early_ioremap_debug) { |
Ingo Molnar | adafdf6 | 2008-01-30 13:34:08 +0100 | [diff] [blame] | 605 | printk(KERN_INFO "early_iounmap(%p, %08lx) [%d]\n", addr, |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 606 | size, slot); |
Ingo Molnar | d18d6d6 | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 607 | dump_stack(); |
| 608 | } |
| 609 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | virt_addr = (unsigned long)addr; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 611 | if (virt_addr < fix_to_virt(FIX_BTMAP_BEGIN)) { |
| 612 | WARN_ON(1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 613 | return; |
Ingo Molnar | bd796ed | 2008-01-30 13:33:45 +0100 | [diff] [blame] | 614 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 615 | offset = virt_addr & ~PAGE_MASK; |
| 616 | nrpages = PAGE_ALIGN(offset + size - 1) >> PAGE_SHIFT; |
| 617 | |
Yinghai Lu | c1a2f4b | 2008-09-14 02:33:12 -0700 | [diff] [blame] | 618 | idx = FIX_BTMAP_BEGIN - NR_FIX_BTMAPS*slot; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 619 | while (nrpages > 0) { |
Huang, Ying | beacfaa | 2008-01-30 13:33:44 +0100 | [diff] [blame] | 620 | early_clear_fixmap(idx); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 621 | --idx; |
| 622 | --nrpages; |
| 623 | } |
Harvey Harrison | 1d6cf1f | 2008-10-28 22:46:04 -0700 | [diff] [blame] | 624 | prev_map[slot] = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | } |