Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * arch/sh/mm/ioremap.c |
| 3 | * |
| 4 | * Re-map IO memory to kernel address space so that we can access it. |
| 5 | * This is needed for high PCI addresses that aren't mapped in the |
| 6 | * 640k-1MB IO memory area on PC's |
| 7 | * |
| 8 | * (C) Copyright 1995 1996 Linus Torvalds |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 9 | * (C) Copyright 2005, 2006 Paul Mundt |
| 10 | * |
| 11 | * This file is subject to the terms and conditions of the GNU General |
| 12 | * Public License. See the file "COPYING" in the main directory of this |
| 13 | * archive for more details. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 15 | #include <linux/vmalloc.h> |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 16 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 17 | #include <linux/mm.h> |
Paul Mundt | a3e61d5 | 2006-09-27 16:45:22 +0900 | [diff] [blame] | 18 | #include <linux/pci.h> |
Haavard Skinnemoen | 5b3e1a8 | 2006-12-08 02:38:07 -0800 | [diff] [blame] | 19 | #include <linux/io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 20 | #include <asm/page.h> |
| 21 | #include <asm/pgalloc.h> |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 22 | #include <asm/addrspace.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | #include <asm/cacheflush.h> |
| 24 | #include <asm/tlbflush.h> |
Paul Mundt | 0fd1475 | 2007-06-04 10:58:23 +0900 | [diff] [blame] | 25 | #include <asm/mmu.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 27 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | * Remap an arbitrary physical address space into the kernel virtual |
| 29 | * address space. Needed when the kernel wants to access high addresses |
| 30 | * directly. |
| 31 | * |
| 32 | * NOTE! We need to allow non-page-aligned mappings too: we will obviously |
| 33 | * have to convert them into an offset in a page-aligned mapping, but the |
| 34 | * caller shouldn't need to know that small detail. |
| 35 | */ |
Paul Mundt | bf3cded | 2009-12-14 14:23:41 +0900 | [diff] [blame] | 36 | void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size, |
| 37 | unsigned long flags, void *caller) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | { |
Paul Mundt | bf3cded | 2009-12-14 14:23:41 +0900 | [diff] [blame] | 39 | struct vm_struct *area; |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 40 | unsigned long offset, last_addr, addr, orig_addr; |
Haavard Skinnemoen | 5b3e1a8 | 2006-12-08 02:38:07 -0800 | [diff] [blame] | 41 | pgprot_t pgprot; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | |
| 43 | /* Don't allow wraparound or zero size */ |
| 44 | last_addr = phys_addr + size - 1; |
| 45 | if (!size || last_addr < phys_addr) |
| 46 | return NULL; |
| 47 | |
| 48 | /* |
Paul Mundt | 99f95f1 | 2009-04-20 18:24:57 +0900 | [diff] [blame] | 49 | * If we're in the fixed PCI memory range, mapping through page |
| 50 | * tables is not only pointless, but also fundamentally broken. |
| 51 | * Just return the physical address instead. |
Paul Mundt | a3e61d5 | 2006-09-27 16:45:22 +0900 | [diff] [blame] | 52 | * |
| 53 | * For boards that map a small PCI memory aperture somewhere in |
| 54 | * P1/P2 space, ioremap() will already do the right thing, |
| 55 | * and we'll never get this far. |
| 56 | */ |
Paul Mundt | 99f95f1 | 2009-04-20 18:24:57 +0900 | [diff] [blame] | 57 | if (is_pci_memory_fixed_range(phys_addr, size)) |
Paul Mundt | a3e61d5 | 2006-09-27 16:45:22 +0900 | [diff] [blame] | 58 | return (void __iomem *)phys_addr; |
| 59 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | /* |
| 61 | * Mappings have to be page-aligned |
| 62 | */ |
| 63 | offset = phys_addr & ~PAGE_MASK; |
| 64 | phys_addr &= PAGE_MASK; |
| 65 | size = PAGE_ALIGN(last_addr+1) - phys_addr; |
| 66 | |
| 67 | /* |
| 68 | * Ok, go for it.. |
| 69 | */ |
Paul Mundt | bf3cded | 2009-12-14 14:23:41 +0900 | [diff] [blame] | 70 | area = get_vm_area_caller(size, VM_IOREMAP, caller); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 71 | if (!area) |
| 72 | return NULL; |
| 73 | area->phys_addr = phys_addr; |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 74 | orig_addr = addr = (unsigned long)area->addr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | |
Yoshihiro Shimoda | 2f47f44 | 2009-03-10 15:49:54 +0900 | [diff] [blame] | 76 | #ifdef CONFIG_PMB |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 77 | /* |
| 78 | * First try to remap through the PMB once a valid VMA has been |
| 79 | * established. Smaller allocations (or the rest of the size |
| 80 | * remaining after a PMB mapping due to the size not being |
| 81 | * perfectly aligned on a PMB size boundary) are then mapped |
| 82 | * through the UTLB using conventional page tables. |
| 83 | * |
| 84 | * PMB entries are all pre-faulted. |
| 85 | */ |
Matt Fleming | 2bea7ea | 2009-10-06 21:22:27 +0000 | [diff] [blame] | 86 | if (unlikely(phys_addr >= P1SEG)) { |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 87 | unsigned long mapped = pmb_remap(addr, phys_addr, size, flags); |
| 88 | |
| 89 | if (likely(mapped)) { |
| 90 | addr += mapped; |
| 91 | phys_addr += mapped; |
| 92 | size -= mapped; |
| 93 | } |
| 94 | } |
| 95 | #endif |
| 96 | |
Haavard Skinnemoen | 5b3e1a8 | 2006-12-08 02:38:07 -0800 | [diff] [blame] | 97 | pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags); |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 98 | if (likely(size)) |
Haavard Skinnemoen | 5b3e1a8 | 2006-12-08 02:38:07 -0800 | [diff] [blame] | 99 | if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) { |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 100 | vunmap((void *)orig_addr); |
| 101 | return NULL; |
| 102 | } |
| 103 | |
| 104 | return (void __iomem *)(offset + (char *)orig_addr); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | } |
Paul Mundt | bf3cded | 2009-12-14 14:23:41 +0900 | [diff] [blame] | 106 | EXPORT_SYMBOL(__ioremap_caller); |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 107 | |
Paul Mundt | 78bf04f | 2010-01-17 01:45:26 +0900 | [diff] [blame^] | 108 | /* |
| 109 | * Simple checks for non-translatable mappings. |
| 110 | */ |
| 111 | static inline int iomapping_nontranslatable(unsigned long offset) |
| 112 | { |
| 113 | #ifdef CONFIG_29BIT |
| 114 | /* |
| 115 | * In 29-bit mode this includes the fixed P1/P2 areas, as well as |
| 116 | * parts of P3. |
| 117 | */ |
| 118 | if (PXSEG(offset) < P3SEG || offset >= P3_ADDR_MAX) |
| 119 | return 1; |
| 120 | #endif |
| 121 | |
| 122 | if (is_pci_memory_fixed_range(offset, 0)) |
| 123 | return 1; |
| 124 | |
| 125 | return 0; |
| 126 | } |
| 127 | |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 128 | void __iounmap(void __iomem *addr) |
| 129 | { |
| 130 | unsigned long vaddr = (unsigned long __force)addr; |
| 131 | struct vm_struct *p; |
| 132 | |
Paul Mundt | 78bf04f | 2010-01-17 01:45:26 +0900 | [diff] [blame^] | 133 | /* |
| 134 | * Nothing to do if there is no translatable mapping. |
| 135 | */ |
| 136 | if (iomapping_nontranslatable(vaddr)) |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 137 | return; |
| 138 | |
Yoshihiro Shimoda | 2f47f44 | 2009-03-10 15:49:54 +0900 | [diff] [blame] | 139 | #ifdef CONFIG_PMB |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 140 | /* |
| 141 | * Purge any PMB entries that may have been established for this |
| 142 | * mapping, then proceed with conventional VMA teardown. |
| 143 | * |
| 144 | * XXX: Note that due to the way that remove_vm_area() does |
| 145 | * matching of the resultant VMA, we aren't able to fast-forward |
| 146 | * the address past the PMB space until the end of the VMA where |
| 147 | * the page tables reside. As such, unmap_vm_area() will be |
| 148 | * forced to linearly scan over the area until it finds the page |
| 149 | * tables where PTEs that need to be unmapped actually reside, |
| 150 | * which is far from optimal. Perhaps we need to use a separate |
| 151 | * VMA for the PMB mappings? |
| 152 | * -- PFM. |
| 153 | */ |
| 154 | pmb_unmap(vaddr); |
| 155 | #endif |
| 156 | |
| 157 | p = remove_vm_area((void *)(vaddr & PAGE_MASK)); |
| 158 | if (!p) { |
Harvey Harrison | 866e6b9 | 2008-03-04 15:23:47 -0800 | [diff] [blame] | 159 | printk(KERN_ERR "%s: bad address %p\n", __func__, addr); |
Paul Mundt | b66c1a3 | 2006-01-16 22:14:15 -0800 | [diff] [blame] | 160 | return; |
| 161 | } |
| 162 | |
| 163 | kfree(p); |
| 164 | } |
| 165 | EXPORT_SYMBOL(__iounmap); |