Haavard Skinnemoen | 5f97f7f | 2006-09-25 23:32:13 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2004-2006 Atmel Corporation |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | #include <linux/vmalloc.h> |
| 9 | #include <linux/module.h> |
| 10 | |
| 11 | #include <asm/io.h> |
| 12 | #include <asm/pgtable.h> |
| 13 | #include <asm/cacheflush.h> |
| 14 | #include <asm/tlbflush.h> |
| 15 | #include <asm/addrspace.h> |
| 16 | |
| 17 | static inline int remap_area_pte(pte_t *pte, unsigned long address, |
| 18 | unsigned long end, unsigned long phys_addr, |
| 19 | pgprot_t prot) |
| 20 | { |
| 21 | unsigned long pfn; |
| 22 | |
| 23 | pfn = phys_addr >> PAGE_SHIFT; |
| 24 | do { |
| 25 | WARN_ON(!pte_none(*pte)); |
| 26 | |
| 27 | set_pte(pte, pfn_pte(pfn, prot)); |
| 28 | address += PAGE_SIZE; |
| 29 | pfn++; |
| 30 | pte++; |
| 31 | } while (address && (address < end)); |
| 32 | |
| 33 | return 0; |
| 34 | } |
| 35 | |
| 36 | static inline int remap_area_pmd(pmd_t *pmd, unsigned long address, |
| 37 | unsigned long end, unsigned long phys_addr, |
| 38 | pgprot_t prot) |
| 39 | { |
| 40 | unsigned long next; |
| 41 | |
| 42 | phys_addr -= address; |
| 43 | |
| 44 | do { |
| 45 | pte_t *pte = pte_alloc_kernel(pmd, address); |
| 46 | if (!pte) |
| 47 | return -ENOMEM; |
| 48 | |
| 49 | next = (address + PMD_SIZE) & PMD_MASK; |
| 50 | if (remap_area_pte(pte, address, next, |
| 51 | address + phys_addr, prot)) |
| 52 | return -ENOMEM; |
| 53 | |
| 54 | address = next; |
| 55 | pmd++; |
| 56 | } while (address && (address < end)); |
| 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | static int remap_area_pud(pud_t *pud, unsigned long address, |
| 61 | unsigned long end, unsigned long phys_addr, |
| 62 | pgprot_t prot) |
| 63 | { |
| 64 | unsigned long next; |
| 65 | |
| 66 | phys_addr -= address; |
| 67 | |
| 68 | do { |
| 69 | pmd_t *pmd = pmd_alloc(&init_mm, pud, address); |
| 70 | if (!pmd) |
| 71 | return -ENOMEM; |
| 72 | next = (address + PUD_SIZE) & PUD_MASK; |
| 73 | if (remap_area_pmd(pmd, address, next, |
| 74 | phys_addr + address, prot)) |
| 75 | return -ENOMEM; |
| 76 | |
| 77 | address = next; |
| 78 | pud++; |
| 79 | } while (address && address < end); |
| 80 | |
| 81 | return 0; |
| 82 | } |
| 83 | |
| 84 | static int remap_area_pages(unsigned long address, unsigned long phys_addr, |
| 85 | size_t size, pgprot_t prot) |
| 86 | { |
| 87 | unsigned long end = address + size; |
| 88 | unsigned long next; |
| 89 | pgd_t *pgd; |
| 90 | int err = 0; |
| 91 | |
| 92 | phys_addr -= address; |
| 93 | |
| 94 | pgd = pgd_offset_k(address); |
| 95 | flush_cache_all(); |
| 96 | BUG_ON(address >= end); |
| 97 | |
| 98 | spin_lock(&init_mm.page_table_lock); |
| 99 | do { |
| 100 | pud_t *pud = pud_alloc(&init_mm, pgd, address); |
| 101 | |
| 102 | err = -ENOMEM; |
| 103 | if (!pud) |
| 104 | break; |
| 105 | |
| 106 | next = (address + PGDIR_SIZE) & PGDIR_MASK; |
| 107 | if (next < address || next > end) |
| 108 | next = end; |
| 109 | err = remap_area_pud(pud, address, next, |
| 110 | phys_addr + address, prot); |
| 111 | if (err) |
| 112 | break; |
| 113 | |
| 114 | address = next; |
| 115 | pgd++; |
| 116 | } while (address && (address < end)); |
| 117 | |
| 118 | spin_unlock(&init_mm.page_table_lock); |
| 119 | flush_tlb_all(); |
| 120 | return err; |
| 121 | } |
| 122 | |
| 123 | /* |
| 124 | * Re-map an arbitrary physical address space into the kernel virtual |
| 125 | * address space. Needed when the kernel wants to access physical |
| 126 | * memory directly. |
| 127 | */ |
| 128 | void __iomem *__ioremap(unsigned long phys_addr, size_t size, |
| 129 | unsigned long flags) |
| 130 | { |
| 131 | void *addr; |
| 132 | struct vm_struct *area; |
| 133 | unsigned long offset, last_addr; |
| 134 | pgprot_t prot; |
| 135 | |
| 136 | /* |
| 137 | * Check if we can simply use the P4 segment. This area is |
| 138 | * uncacheable, so if caching/buffering is requested, we can't |
| 139 | * use it. |
| 140 | */ |
| 141 | if ((phys_addr >= P4SEG) && (flags == 0)) |
| 142 | return (void __iomem *)phys_addr; |
| 143 | |
| 144 | /* Don't allow wraparound or zero size */ |
| 145 | last_addr = phys_addr + size - 1; |
| 146 | if (!size || last_addr < phys_addr) |
| 147 | return NULL; |
| 148 | |
| 149 | /* |
| 150 | * XXX: When mapping regular RAM, we'd better make damn sure |
| 151 | * it's never used for anything else. But this is really the |
| 152 | * caller's responsibility... |
| 153 | */ |
| 154 | if (PHYSADDR(P2SEGADDR(phys_addr)) == phys_addr) |
| 155 | return (void __iomem *)P2SEGADDR(phys_addr); |
| 156 | |
| 157 | /* Mappings have to be page-aligned */ |
| 158 | offset = phys_addr & ~PAGE_MASK; |
| 159 | phys_addr &= PAGE_MASK; |
| 160 | size = PAGE_ALIGN(last_addr + 1) - phys_addr; |
| 161 | |
| 162 | prot = __pgprot(_PAGE_PRESENT | _PAGE_RW | _PAGE_DIRTY |
| 163 | | _PAGE_ACCESSED | _PAGE_TYPE_SMALL | flags); |
| 164 | |
| 165 | /* |
| 166 | * Ok, go for it.. |
| 167 | */ |
| 168 | area = get_vm_area(size, VM_IOREMAP); |
| 169 | if (!area) |
| 170 | return NULL; |
| 171 | area->phys_addr = phys_addr; |
| 172 | addr = area->addr; |
| 173 | if (remap_area_pages((unsigned long)addr, phys_addr, size, prot)) { |
| 174 | vunmap(addr); |
| 175 | return NULL; |
| 176 | } |
| 177 | |
| 178 | return (void __iomem *)(offset + (char *)addr); |
| 179 | } |
| 180 | EXPORT_SYMBOL(__ioremap); |
| 181 | |
| 182 | void __iounmap(void __iomem *addr) |
| 183 | { |
| 184 | struct vm_struct *p; |
| 185 | |
| 186 | if ((unsigned long)addr >= P4SEG) |
| 187 | return; |
| 188 | |
| 189 | p = remove_vm_area((void *)(PAGE_MASK & (unsigned long __force)addr)); |
| 190 | if (unlikely(!p)) { |
| 191 | printk (KERN_ERR "iounmap: bad address %p\n", addr); |
| 192 | return; |
| 193 | } |
| 194 | |
| 195 | kfree (p); |
| 196 | } |
| 197 | EXPORT_SYMBOL(__iounmap); |