blob: 2141befb4f918c3ab98b020d8cd8e9598cccaefa [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * arch/sh/mm/ioremap.c
3 *
4 * Re-map IO memory to kernel address space so that we can access it.
5 * This is needed for high PCI addresses that aren't mapped in the
6 * 640k-1MB IO memory area on PC's
7 *
8 * (C) Copyright 1995 1996 Linus Torvalds
Paul Mundtb66c1a32006-01-16 22:14:15 -08009 * (C) Copyright 2005, 2006 Paul Mundt
10 *
11 * This file is subject to the terms and conditions of the GNU General
12 * Public License. See the file "COPYING" in the main directory of this
13 * archive for more details.
Linus Torvalds1da177e2005-04-16 15:20:36 -070014 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070015#include <linux/vmalloc.h>
Paul Mundtb66c1a32006-01-16 22:14:15 -080016#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017#include <linux/mm.h>
Paul Mundta3e61d52006-09-27 16:45:22 +090018#include <linux/pci.h>
Haavard Skinnemoen5b3e1a82006-12-08 02:38:07 -080019#include <linux/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/page.h>
21#include <asm/pgalloc.h>
Paul Mundtb66c1a32006-01-16 22:14:15 -080022#include <asm/addrspace.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023#include <asm/cacheflush.h>
24#include <asm/tlbflush.h>
Paul Mundt0fd14752007-06-04 10:58:23 +090025#include <asm/mmu.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Linus Torvalds1da177e2005-04-16 15:20:36 -070027/*
Linus Torvalds1da177e2005-04-16 15:20:36 -070028 * Remap an arbitrary physical address space into the kernel virtual
29 * address space. Needed when the kernel wants to access high addresses
30 * directly.
31 *
32 * NOTE! We need to allow non-page-aligned mappings too: we will obviously
33 * have to convert them into an offset in a page-aligned mapping, but the
34 * caller shouldn't need to know that small detail.
35 */
Paul Mundtbf3cded2009-12-14 14:23:41 +090036void __iomem *__ioremap_caller(unsigned long phys_addr, unsigned long size,
37 unsigned long flags, void *caller)
Linus Torvalds1da177e2005-04-16 15:20:36 -070038{
Paul Mundtbf3cded2009-12-14 14:23:41 +090039 struct vm_struct *area;
Paul Mundtb66c1a32006-01-16 22:14:15 -080040 unsigned long offset, last_addr, addr, orig_addr;
Haavard Skinnemoen5b3e1a82006-12-08 02:38:07 -080041 pgprot_t pgprot;
Linus Torvalds1da177e2005-04-16 15:20:36 -070042
43 /* Don't allow wraparound or zero size */
44 last_addr = phys_addr + size - 1;
45 if (!size || last_addr < phys_addr)
46 return NULL;
47
48 /*
Paul Mundt99f95f12009-04-20 18:24:57 +090049 * If we're in the fixed PCI memory range, mapping through page
50 * tables is not only pointless, but also fundamentally broken.
51 * Just return the physical address instead.
Paul Mundta3e61d52006-09-27 16:45:22 +090052 *
53 * For boards that map a small PCI memory aperture somewhere in
54 * P1/P2 space, ioremap() will already do the right thing,
55 * and we'll never get this far.
56 */
Paul Mundt99f95f12009-04-20 18:24:57 +090057 if (is_pci_memory_fixed_range(phys_addr, size))
Paul Mundta3e61d52006-09-27 16:45:22 +090058 return (void __iomem *)phys_addr;
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 /*
61 * Mappings have to be page-aligned
62 */
63 offset = phys_addr & ~PAGE_MASK;
64 phys_addr &= PAGE_MASK;
65 size = PAGE_ALIGN(last_addr+1) - phys_addr;
66
67 /*
68 * Ok, go for it..
69 */
Paul Mundtbf3cded2009-12-14 14:23:41 +090070 area = get_vm_area_caller(size, VM_IOREMAP, caller);
Linus Torvalds1da177e2005-04-16 15:20:36 -070071 if (!area)
72 return NULL;
73 area->phys_addr = phys_addr;
Paul Mundtb66c1a32006-01-16 22:14:15 -080074 orig_addr = addr = (unsigned long)area->addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070075
Yoshihiro Shimoda2f47f442009-03-10 15:49:54 +090076#ifdef CONFIG_PMB
Paul Mundtb66c1a32006-01-16 22:14:15 -080077 /*
78 * First try to remap through the PMB once a valid VMA has been
79 * established. Smaller allocations (or the rest of the size
80 * remaining after a PMB mapping due to the size not being
81 * perfectly aligned on a PMB size boundary) are then mapped
82 * through the UTLB using conventional page tables.
83 *
84 * PMB entries are all pre-faulted.
85 */
Matt Fleming2bea7ea2009-10-06 21:22:27 +000086 if (unlikely(phys_addr >= P1SEG)) {
Paul Mundtb66c1a32006-01-16 22:14:15 -080087 unsigned long mapped = pmb_remap(addr, phys_addr, size, flags);
88
89 if (likely(mapped)) {
90 addr += mapped;
91 phys_addr += mapped;
92 size -= mapped;
93 }
94 }
95#endif
96
Haavard Skinnemoen5b3e1a82006-12-08 02:38:07 -080097 pgprot = __pgprot(pgprot_val(PAGE_KERNEL_NOCACHE) | flags);
Paul Mundtb66c1a32006-01-16 22:14:15 -080098 if (likely(size))
Haavard Skinnemoen5b3e1a82006-12-08 02:38:07 -080099 if (ioremap_page_range(addr, addr + size, phys_addr, pgprot)) {
Paul Mundtb66c1a32006-01-16 22:14:15 -0800100 vunmap((void *)orig_addr);
101 return NULL;
102 }
103
104 return (void __iomem *)(offset + (char *)orig_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105}
Paul Mundtbf3cded2009-12-14 14:23:41 +0900106EXPORT_SYMBOL(__ioremap_caller);
Paul Mundtb66c1a32006-01-16 22:14:15 -0800107
108void __iounmap(void __iomem *addr)
109{
110 unsigned long vaddr = (unsigned long __force)addr;
Magnus Damm716777d2008-11-25 21:57:29 +0900111 unsigned long seg = PXSEG(vaddr);
Paul Mundtb66c1a32006-01-16 22:14:15 -0800112 struct vm_struct *p;
113
Paul Mundt99f95f12009-04-20 18:24:57 +0900114 if (seg < P3SEG || vaddr >= P3_ADDR_MAX)
115 return;
116 if (is_pci_memory_fixed_range(vaddr, 0))
Paul Mundtb66c1a32006-01-16 22:14:15 -0800117 return;
118
Yoshihiro Shimoda2f47f442009-03-10 15:49:54 +0900119#ifdef CONFIG_PMB
Paul Mundtb66c1a32006-01-16 22:14:15 -0800120 /*
121 * Purge any PMB entries that may have been established for this
122 * mapping, then proceed with conventional VMA teardown.
123 *
124 * XXX: Note that due to the way that remove_vm_area() does
125 * matching of the resultant VMA, we aren't able to fast-forward
126 * the address past the PMB space until the end of the VMA where
127 * the page tables reside. As such, unmap_vm_area() will be
128 * forced to linearly scan over the area until it finds the page
129 * tables where PTEs that need to be unmapped actually reside,
130 * which is far from optimal. Perhaps we need to use a separate
131 * VMA for the PMB mappings?
132 * -- PFM.
133 */
134 pmb_unmap(vaddr);
135#endif
136
137 p = remove_vm_area((void *)(vaddr & PAGE_MASK));
138 if (!p) {
Harvey Harrison866e6b92008-03-04 15:23:47 -0800139 printk(KERN_ERR "%s: bad address %p\n", __func__, addr);
Paul Mundtb66c1a32006-01-16 22:14:15 -0800140 return;
141 }
142
143 kfree(p);
144}
145EXPORT_SYMBOL(__iounmap);