Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2010 Tilera Corporation. All Rights Reserved. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation, version 2. |
| 7 | * |
| 8 | * This program is distributed in the hope that it will be useful, but |
| 9 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 10 | * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or |
| 11 | * NON INFRINGEMENT. See the GNU General Public License for |
| 12 | * more details. |
| 13 | */ |
| 14 | |
| 15 | #include <linux/highmem.h> |
| 16 | #include <linux/module.h> |
| 17 | #include <linux/pagemap.h> |
| 18 | #include <asm/homecache.h> |
| 19 | |
| 20 | #define kmap_get_pte(vaddr) \ |
| 21 | pte_offset_kernel(pmd_offset(pud_offset(pgd_offset_k(vaddr), (vaddr)),\ |
| 22 | (vaddr)), (vaddr)) |
| 23 | |
| 24 | |
| 25 | void *kmap(struct page *page) |
| 26 | { |
| 27 | void *kva; |
| 28 | unsigned long flags; |
| 29 | pte_t *ptep; |
| 30 | |
| 31 | might_sleep(); |
| 32 | if (!PageHighMem(page)) |
| 33 | return page_address(page); |
| 34 | kva = kmap_high(page); |
| 35 | |
| 36 | /* |
| 37 | * Rewrite the PTE under the lock. This ensures that the page |
| 38 | * is not currently migrating. |
| 39 | */ |
| 40 | ptep = kmap_get_pte((unsigned long)kva); |
| 41 | flags = homecache_kpte_lock(); |
| 42 | set_pte_at(&init_mm, kva, ptep, mk_pte(page, page_to_kpgprot(page))); |
| 43 | homecache_kpte_unlock(flags); |
| 44 | |
| 45 | return kva; |
| 46 | } |
| 47 | EXPORT_SYMBOL(kmap); |
| 48 | |
| 49 | void kunmap(struct page *page) |
| 50 | { |
| 51 | if (in_interrupt()) |
| 52 | BUG(); |
| 53 | if (!PageHighMem(page)) |
| 54 | return; |
| 55 | kunmap_high(page); |
| 56 | } |
| 57 | EXPORT_SYMBOL(kunmap); |
| 58 | |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 59 | /* |
| 60 | * Describe a single atomic mapping of a page on a given cpu at a |
| 61 | * given address, and allow it to be linked into a list. |
| 62 | */ |
| 63 | struct atomic_mapped_page { |
| 64 | struct list_head list; |
| 65 | struct page *page; |
| 66 | int cpu; |
| 67 | unsigned long va; |
| 68 | }; |
| 69 | |
| 70 | static spinlock_t amp_lock = __SPIN_LOCK_UNLOCKED(&_lock); |
| 71 | static struct list_head amp_list = LIST_HEAD_INIT(amp_list); |
| 72 | |
| 73 | /* |
| 74 | * Combining this structure with a per-cpu declaration lets us give |
| 75 | * each cpu an atomic_mapped_page structure per type. |
| 76 | */ |
| 77 | struct kmap_amps { |
| 78 | struct atomic_mapped_page per_type[KM_TYPE_NR]; |
| 79 | }; |
Chris Metcalf | 0707ad3 | 2010-06-25 17:04:17 -0400 | [diff] [blame] | 80 | static DEFINE_PER_CPU(struct kmap_amps, amps); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 81 | |
| 82 | /* |
| 83 | * Add a page and va, on this cpu, to the list of kmap_atomic pages, |
| 84 | * and write the new pte to memory. Writing the new PTE under the |
| 85 | * lock guarantees that it is either on the list before migration starts |
| 86 | * (if we won the race), or set_pte() sets the migrating bit in the PTE |
| 87 | * (if we lost the race). And doing it under the lock guarantees |
| 88 | * that when kmap_atomic_fix_one_pte() comes along, it finds a valid |
| 89 | * PTE in memory, iff the mapping is still on the amp_list. |
| 90 | * |
| 91 | * Finally, doing it under the lock lets us safely examine the page |
| 92 | * to see if it is immutable or not, for the generic kmap_atomic() case. |
| 93 | * If we examine it earlier we are exposed to a race where it looks |
| 94 | * writable earlier, but becomes immutable before we write the PTE. |
| 95 | */ |
| 96 | static void kmap_atomic_register(struct page *page, enum km_type type, |
| 97 | unsigned long va, pte_t *ptep, pte_t pteval) |
| 98 | { |
| 99 | unsigned long flags; |
| 100 | struct atomic_mapped_page *amp; |
| 101 | |
| 102 | flags = homecache_kpte_lock(); |
| 103 | spin_lock(&_lock); |
| 104 | |
| 105 | /* With interrupts disabled, now fill in the per-cpu info. */ |
| 106 | amp = &__get_cpu_var(amps).per_type[type]; |
| 107 | amp->page = page; |
| 108 | amp->cpu = smp_processor_id(); |
| 109 | amp->va = va; |
| 110 | |
| 111 | /* For generic kmap_atomic(), choose the PTE writability now. */ |
| 112 | if (!pte_read(pteval)) |
| 113 | pteval = mk_pte(page, page_to_kpgprot(page)); |
| 114 | |
| 115 | list_add(&->list, &_list); |
| 116 | set_pte(ptep, pteval); |
| 117 | arch_flush_lazy_mmu_mode(); |
| 118 | |
| 119 | spin_unlock(&_lock); |
| 120 | homecache_kpte_unlock(flags); |
| 121 | } |
| 122 | |
| 123 | /* |
| 124 | * Remove a page and va, on this cpu, from the list of kmap_atomic pages. |
| 125 | * Linear-time search, but we count on the lists being short. |
| 126 | * We don't need to adjust the PTE under the lock (as opposed to the |
| 127 | * kmap_atomic_register() case), since we're just unconditionally |
| 128 | * zeroing the PTE after it's off the list. |
| 129 | */ |
| 130 | static void kmap_atomic_unregister(struct page *page, unsigned long va) |
| 131 | { |
| 132 | unsigned long flags; |
| 133 | struct atomic_mapped_page *amp; |
| 134 | int cpu = smp_processor_id(); |
| 135 | spin_lock_irqsave(&_lock, flags); |
| 136 | list_for_each_entry(amp, &_list, list) { |
| 137 | if (amp->page == page && amp->cpu == cpu && amp->va == va) |
| 138 | break; |
| 139 | } |
| 140 | BUG_ON(&->list == &_list); |
| 141 | list_del(&->list); |
| 142 | spin_unlock_irqrestore(&_lock, flags); |
| 143 | } |
| 144 | |
| 145 | /* Helper routine for kmap_atomic_fix_kpte(), below. */ |
| 146 | static void kmap_atomic_fix_one_kpte(struct atomic_mapped_page *amp, |
| 147 | int finished) |
| 148 | { |
| 149 | pte_t *ptep = kmap_get_pte(amp->va); |
| 150 | if (!finished) { |
| 151 | set_pte(ptep, pte_mkmigrate(*ptep)); |
| 152 | flush_remote(0, 0, NULL, amp->va, PAGE_SIZE, PAGE_SIZE, |
| 153 | cpumask_of(amp->cpu), NULL, 0); |
| 154 | } else { |
| 155 | /* |
| 156 | * Rewrite a default kernel PTE for this page. |
| 157 | * We rely on the fact that set_pte() writes the |
| 158 | * present+migrating bits last. |
| 159 | */ |
| 160 | pte_t pte = mk_pte(amp->page, page_to_kpgprot(amp->page)); |
| 161 | set_pte(ptep, pte); |
| 162 | } |
| 163 | } |
| 164 | |
| 165 | /* |
| 166 | * This routine is a helper function for homecache_fix_kpte(); see |
| 167 | * its comments for more information on the "finished" argument here. |
| 168 | * |
| 169 | * Note that we hold the lock while doing the remote flushes, which |
| 170 | * will stall any unrelated cpus trying to do kmap_atomic operations. |
| 171 | * We could just update the PTEs under the lock, and save away copies |
| 172 | * of the structs (or just the va+cpu), then flush them after we |
| 173 | * release the lock, but it seems easier just to do it all under the lock. |
| 174 | */ |
| 175 | void kmap_atomic_fix_kpte(struct page *page, int finished) |
| 176 | { |
| 177 | struct atomic_mapped_page *amp; |
| 178 | unsigned long flags; |
| 179 | spin_lock_irqsave(&_lock, flags); |
| 180 | list_for_each_entry(amp, &_list, list) { |
| 181 | if (amp->page == page) |
| 182 | kmap_atomic_fix_one_kpte(amp, finished); |
| 183 | } |
| 184 | spin_unlock_irqrestore(&_lock, flags); |
| 185 | } |
| 186 | |
| 187 | /* |
| 188 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap |
| 189 | * because the kmap code must perform a global TLB invalidation when |
| 190 | * the kmap pool wraps. |
| 191 | * |
| 192 | * Note that they may be slower than on x86 (etc.) because unlike on |
| 193 | * those platforms, we do have to take a global lock to map and unmap |
| 194 | * pages on Tile (see above). |
| 195 | * |
| 196 | * When holding an atomic kmap is is not legal to sleep, so atomic |
| 197 | * kmaps are appropriate for short, tight code paths only. |
| 198 | */ |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 199 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 200 | { |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 201 | unsigned long vaddr; |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 202 | int idx, type; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 203 | pte_t *pte; |
| 204 | |
| 205 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
| 206 | pagefault_disable(); |
| 207 | |
| 208 | /* Avoid icache flushes by disallowing atomic executable mappings. */ |
| 209 | BUG_ON(pte_exec(prot)); |
| 210 | |
| 211 | if (!PageHighMem(page)) |
| 212 | return page_address(page); |
| 213 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 214 | type = kmap_atomic_idx_push(); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 215 | idx = type + KM_TYPE_NR*smp_processor_id(); |
| 216 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 217 | pte = kmap_get_pte(vaddr); |
| 218 | BUG_ON(!pte_none(*pte)); |
| 219 | |
| 220 | /* Register that this page is mapped atomically on this cpu. */ |
| 221 | kmap_atomic_register(page, type, vaddr, pte, mk_pte(page, prot)); |
| 222 | |
| 223 | return (void *)vaddr; |
| 224 | } |
| 225 | EXPORT_SYMBOL(kmap_atomic_prot); |
| 226 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 227 | void *__kmap_atomic(struct page *page) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 228 | { |
| 229 | /* PAGE_NONE is a magic value that tells us to check immutability. */ |
| 230 | return kmap_atomic_prot(page, type, PAGE_NONE); |
| 231 | } |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 232 | EXPORT_SYMBOL(__kmap_atomic); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 233 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 234 | void __kunmap_atomic(void *kvaddr) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 235 | { |
| 236 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 237 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 238 | if (vaddr >= __fix_to_virt(FIX_KMAP_END) && |
| 239 | vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 240 | pte_t *pte = kmap_get_pte(vaddr); |
| 241 | pte_t pteval = *pte; |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 242 | int idx, type; |
| 243 | |
Peter Zijlstra | 2027394 | 2010-10-27 15:32:58 -0700 | [diff] [blame^] | 244 | type = kmap_atomic_idx(); |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 245 | idx = type + KM_TYPE_NR*smp_processor_id(); |
| 246 | |
| 247 | /* |
| 248 | * Force other mappings to Oops if they try to access this pte |
| 249 | * without first remapping it. Keeping stale mappings around |
| 250 | * is a bad idea. |
| 251 | */ |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 252 | BUG_ON(!pte_present(pteval) && !pte_migrating(pteval)); |
| 253 | kmap_atomic_unregister(pte_page(pteval), vaddr); |
| 254 | kpte_clear_flush(pte, vaddr); |
Peter Zijlstra | 2027394 | 2010-10-27 15:32:58 -0700 | [diff] [blame^] | 255 | kmap_atomic_idx_pop(); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 256 | } else { |
| 257 | /* Must be a lowmem page */ |
| 258 | BUG_ON(vaddr < PAGE_OFFSET); |
| 259 | BUG_ON(vaddr >= (unsigned long)high_memory); |
| 260 | } |
| 261 | |
| 262 | arch_flush_lazy_mmu_mode(); |
| 263 | pagefault_enable(); |
| 264 | } |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 265 | EXPORT_SYMBOL(__kunmap_atomic); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 266 | |
| 267 | /* |
| 268 | * This API is supposed to allow us to map memory without a "struct page". |
| 269 | * Currently we don't support this, though this may change in the future. |
| 270 | */ |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 271 | void *kmap_atomic_pfn(unsigned long pfn) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 272 | { |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 273 | return kmap_atomic(pfn_to_page(pfn)); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 274 | } |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 275 | void *kmap_atomic_prot_pfn(unsigned long pfn, pgprot_t prot) |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 276 | { |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 277 | return kmap_atomic_prot(pfn_to_page(pfn), prot); |
Chris Metcalf | 867e359 | 2010-05-28 23:09:12 -0400 | [diff] [blame] | 278 | } |
| 279 | |
| 280 | struct page *kmap_atomic_to_page(void *ptr) |
| 281 | { |
| 282 | pte_t *pte; |
| 283 | unsigned long vaddr = (unsigned long)ptr; |
| 284 | |
| 285 | if (vaddr < FIXADDR_START) |
| 286 | return virt_to_page(ptr); |
| 287 | |
| 288 | pte = kmap_get_pte(vaddr); |
| 289 | return pte_page(*pte); |
| 290 | } |