Nicolas Pitre | d73cd42 | 2008-09-15 16:44:55 -0400 | [diff] [blame] | 1 | /* |
| 2 | * arch/arm/mm/highmem.c -- ARM highmem support |
| 3 | * |
| 4 | * Author: Nicolas Pitre |
| 5 | * Created: september 8, 2008 |
| 6 | * Copyright: Marvell Semiconductors Inc. |
| 7 | * |
| 8 | * This program is free software; you can redistribute it and/or modify |
| 9 | * it under the terms of the GNU General Public License version 2 as |
| 10 | * published by the Free Software Foundation. |
| 11 | */ |
| 12 | |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/highmem.h> |
| 15 | #include <linux/interrupt.h> |
| 16 | #include <asm/fixmap.h> |
| 17 | #include <asm/cacheflush.h> |
| 18 | #include <asm/tlbflush.h> |
| 19 | #include "mm.h" |
| 20 | |
| 21 | void *kmap(struct page *page) |
| 22 | { |
| 23 | might_sleep(); |
| 24 | if (!PageHighMem(page)) |
| 25 | return page_address(page); |
| 26 | return kmap_high(page); |
| 27 | } |
| 28 | EXPORT_SYMBOL(kmap); |
| 29 | |
| 30 | void kunmap(struct page *page) |
| 31 | { |
| 32 | BUG_ON(in_interrupt()); |
| 33 | if (!PageHighMem(page)) |
| 34 | return; |
| 35 | kunmap_high(page); |
| 36 | } |
| 37 | EXPORT_SYMBOL(kunmap); |
| 38 | |
| 39 | void *kmap_atomic(struct page *page, enum km_type type) |
| 40 | { |
| 41 | unsigned int idx; |
| 42 | unsigned long vaddr; |
Nicolas Pitre | 7929eb9 | 2009-09-03 21:45:59 +0100 | [diff] [blame] | 43 | void *kmap; |
Nicolas Pitre | d73cd42 | 2008-09-15 16:44:55 -0400 | [diff] [blame] | 44 | |
| 45 | pagefault_disable(); |
| 46 | if (!PageHighMem(page)) |
| 47 | return page_address(page); |
| 48 | |
Russell King | 6a5e293 | 2009-10-11 16:29:48 +0100 | [diff] [blame] | 49 | debug_kmap_atomic(type); |
| 50 | |
Nicolas Pitre | 17ebba1 | 2010-06-07 21:28:55 +0100 | [diff] [blame^] | 51 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 52 | /* |
| 53 | * There is no cache coherency issue when non VIVT, so force the |
| 54 | * dedicated kmap usage for better debugging purposes in that case. |
| 55 | */ |
| 56 | if (!cache_is_vivt()) |
| 57 | kmap = NULL; |
| 58 | else |
| 59 | #endif |
| 60 | kmap = kmap_high_get(page); |
Nicolas Pitre | 7929eb9 | 2009-09-03 21:45:59 +0100 | [diff] [blame] | 61 | if (kmap) |
| 62 | return kmap; |
| 63 | |
Nicolas Pitre | d73cd42 | 2008-09-15 16:44:55 -0400 | [diff] [blame] | 64 | idx = type + KM_TYPE_NR * smp_processor_id(); |
| 65 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 66 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 67 | /* |
| 68 | * With debugging enabled, kunmap_atomic forces that entry to 0. |
| 69 | * Make sure it was indeed properly unmapped. |
| 70 | */ |
| 71 | BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); |
| 72 | #endif |
| 73 | set_pte_ext(TOP_PTE(vaddr), mk_pte(page, kmap_prot), 0); |
| 74 | /* |
| 75 | * When debugging is off, kunmap_atomic leaves the previous mapping |
| 76 | * in place, so this TLB flush ensures the TLB is updated with the |
| 77 | * new mapping. |
| 78 | */ |
| 79 | local_flush_tlb_kernel_page(vaddr); |
| 80 | |
| 81 | return (void *)vaddr; |
| 82 | } |
| 83 | EXPORT_SYMBOL(kmap_atomic); |
| 84 | |
| 85 | void kunmap_atomic(void *kvaddr, enum km_type type) |
| 86 | { |
| 87 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
| 88 | unsigned int idx = type + KM_TYPE_NR * smp_processor_id(); |
| 89 | |
| 90 | if (kvaddr >= (void *)FIXADDR_START) { |
Nicolas Pitre | 7e5a69e | 2010-03-29 21:46:02 +0100 | [diff] [blame] | 91 | if (cache_is_vivt()) |
| 92 | __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE); |
Nicolas Pitre | d73cd42 | 2008-09-15 16:44:55 -0400 | [diff] [blame] | 93 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 94 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
| 95 | set_pte_ext(TOP_PTE(vaddr), __pte(0), 0); |
| 96 | local_flush_tlb_kernel_page(vaddr); |
| 97 | #else |
| 98 | (void) idx; /* to kill a warning */ |
| 99 | #endif |
Nicolas Pitre | 7929eb9 | 2009-09-03 21:45:59 +0100 | [diff] [blame] | 100 | } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) { |
| 101 | /* this address was obtained through kmap_high_get() */ |
| 102 | kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)])); |
Nicolas Pitre | d73cd42 | 2008-09-15 16:44:55 -0400 | [diff] [blame] | 103 | } |
| 104 | pagefault_enable(); |
| 105 | } |
| 106 | EXPORT_SYMBOL(kunmap_atomic); |
| 107 | |
| 108 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) |
| 109 | { |
| 110 | unsigned int idx; |
| 111 | unsigned long vaddr; |
| 112 | |
| 113 | pagefault_disable(); |
| 114 | |
| 115 | idx = type + KM_TYPE_NR * smp_processor_id(); |
| 116 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 117 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 118 | BUG_ON(!pte_none(*(TOP_PTE(vaddr)))); |
| 119 | #endif |
| 120 | set_pte_ext(TOP_PTE(vaddr), pfn_pte(pfn, kmap_prot), 0); |
| 121 | local_flush_tlb_kernel_page(vaddr); |
| 122 | |
| 123 | return (void *)vaddr; |
| 124 | } |
| 125 | |
| 126 | struct page *kmap_atomic_to_page(const void *ptr) |
| 127 | { |
| 128 | unsigned long vaddr = (unsigned long)ptr; |
| 129 | pte_t *pte; |
| 130 | |
| 131 | if (vaddr < FIXADDR_START) |
| 132 | return virt_to_page(ptr); |
| 133 | |
| 134 | pte = TOP_PTE(vaddr); |
| 135 | return pte_page(*pte); |
| 136 | } |
Nicolas Pitre | 7e5a69e | 2010-03-29 21:46:02 +0100 | [diff] [blame] | 137 | |
| 138 | #ifdef CONFIG_CPU_CACHE_VIPT |
| 139 | |
| 140 | #include <linux/percpu.h> |
| 141 | |
| 142 | /* |
| 143 | * The VIVT cache of a highmem page is always flushed before the page |
| 144 | * is unmapped. Hence unmapped highmem pages need no cache maintenance |
| 145 | * in that case. |
| 146 | * |
| 147 | * However unmapped pages may still be cached with a VIPT cache, and |
| 148 | * it is not possible to perform cache maintenance on them using physical |
| 149 | * addresses unfortunately. So we have no choice but to set up a temporary |
| 150 | * virtual mapping for that purpose. |
| 151 | * |
| 152 | * Yet this VIPT cache maintenance may be triggered from DMA support |
| 153 | * functions which are possibly called from interrupt context. As we don't |
| 154 | * want to keep interrupt disabled all the time when such maintenance is |
| 155 | * taking place, we therefore allow for some reentrancy by preserving and |
| 156 | * restoring the previous fixmap entry before the interrupted context is |
| 157 | * resumed. If the reentrancy depth is 0 then there is no need to restore |
| 158 | * the previous fixmap, and leaving the current one in place allow it to |
| 159 | * be reused the next time without a TLB flush (common with DMA). |
| 160 | */ |
| 161 | |
| 162 | static DEFINE_PER_CPU(int, kmap_high_l1_vipt_depth); |
| 163 | |
| 164 | void *kmap_high_l1_vipt(struct page *page, pte_t *saved_pte) |
| 165 | { |
| 166 | unsigned int idx, cpu = smp_processor_id(); |
| 167 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); |
| 168 | unsigned long vaddr, flags; |
| 169 | pte_t pte, *ptep; |
| 170 | |
| 171 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; |
| 172 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 173 | ptep = TOP_PTE(vaddr); |
| 174 | pte = mk_pte(page, kmap_prot); |
| 175 | |
| 176 | if (!in_interrupt()) |
| 177 | preempt_disable(); |
| 178 | |
| 179 | raw_local_irq_save(flags); |
| 180 | (*depth)++; |
| 181 | if (pte_val(*ptep) == pte_val(pte)) { |
| 182 | *saved_pte = pte; |
| 183 | } else { |
| 184 | *saved_pte = *ptep; |
| 185 | set_pte_ext(ptep, pte, 0); |
| 186 | local_flush_tlb_kernel_page(vaddr); |
| 187 | } |
| 188 | raw_local_irq_restore(flags); |
| 189 | |
| 190 | return (void *)vaddr; |
| 191 | } |
| 192 | |
| 193 | void kunmap_high_l1_vipt(struct page *page, pte_t saved_pte) |
| 194 | { |
| 195 | unsigned int idx, cpu = smp_processor_id(); |
| 196 | int *depth = &per_cpu(kmap_high_l1_vipt_depth, cpu); |
| 197 | unsigned long vaddr, flags; |
| 198 | pte_t pte, *ptep; |
| 199 | |
| 200 | idx = KM_L1_CACHE + KM_TYPE_NR * cpu; |
| 201 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 202 | ptep = TOP_PTE(vaddr); |
| 203 | pte = mk_pte(page, kmap_prot); |
| 204 | |
| 205 | BUG_ON(pte_val(*ptep) != pte_val(pte)); |
| 206 | BUG_ON(*depth <= 0); |
| 207 | |
| 208 | raw_local_irq_save(flags); |
| 209 | (*depth)--; |
| 210 | if (*depth != 0 && pte_val(pte) != pte_val(saved_pte)) { |
| 211 | set_pte_ext(ptep, saved_pte, 0); |
| 212 | local_flush_tlb_kernel_page(vaddr); |
| 213 | } |
| 214 | raw_local_irq_restore(flags); |
| 215 | |
| 216 | if (!in_interrupt()) |
| 217 | preempt_enable(); |
| 218 | } |
| 219 | |
| 220 | #endif /* CONFIG_CPU_CACHE_VIPT */ |