Ralf Baechle | b99fbc1 | 2012-09-06 11:29:53 +0200 | [diff] [blame] | 1 | #include <linux/compiler.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2 | #include <linux/module.h> |
| 3 | #include <linux/highmem.h> |
Yoichi Yuasa | 52ab320 | 2010-02-20 21:23:22 +0900 | [diff] [blame] | 4 | #include <linux/sched.h> |
Ralf Baechle | 631330f | 2009-06-19 14:05:26 +0100 | [diff] [blame] | 5 | #include <linux/smp.h> |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 6 | #include <asm/fixmap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 7 | #include <asm/tlbflush.h> |
| 8 | |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 9 | static pte_t *kmap_pte; |
| 10 | |
| 11 | unsigned long highstart_pfn, highend_pfn; |
| 12 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 13 | void *kmap(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | { |
| 15 | void *addr; |
| 16 | |
| 17 | might_sleep(); |
| 18 | if (!PageHighMem(page)) |
| 19 | return page_address(page); |
| 20 | addr = kmap_high(page); |
| 21 | flush_tlb_one((unsigned long)addr); |
| 22 | |
| 23 | return addr; |
| 24 | } |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 25 | EXPORT_SYMBOL(kmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 26 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 27 | void kunmap(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 28 | { |
Ralf Baechle | b72b709 | 2009-03-30 14:49:44 +0200 | [diff] [blame] | 29 | BUG_ON(in_interrupt()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | if (!PageHighMem(page)) |
| 31 | return; |
| 32 | kunmap_high(page); |
| 33 | } |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 34 | EXPORT_SYMBOL(kunmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | |
| 36 | /* |
| 37 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because |
| 38 | * no global lock is needed and because the kmap code must perform a global TLB |
| 39 | * invalidation when the kmap pool wraps. |
| 40 | * |
| 41 | * However when holding an atomic kmap is is not legal to sleep, so atomic |
| 42 | * kmaps are appropriate for short, tight code paths only. |
| 43 | */ |
| 44 | |
Cong Wang | a24401b | 2011-11-26 10:53:39 +0800 | [diff] [blame] | 45 | void *kmap_atomic(struct page *page) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | unsigned long vaddr; |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 48 | int idx, type; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 49 | |
| 50 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 51 | pagefault_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | if (!PageHighMem(page)) |
| 53 | return page_address(page); |
| 54 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 55 | type = kmap_atomic_idx_push(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | idx = type + KM_TYPE_NR*smp_processor_id(); |
| 57 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 58 | #ifdef CONFIG_DEBUG_HIGHMEM |
Ralf Baechle | b72b709 | 2009-03-30 14:49:44 +0200 | [diff] [blame] | 59 | BUG_ON(!pte_none(*(kmap_pte - idx))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | #endif |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 61 | set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 62 | local_flush_tlb_one((unsigned long)vaddr); |
| 63 | |
| 64 | return (void*) vaddr; |
| 65 | } |
Cong Wang | a24401b | 2011-11-26 10:53:39 +0800 | [diff] [blame] | 66 | EXPORT_SYMBOL(kmap_atomic); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 67 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 68 | void __kunmap_atomic(void *kvaddr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 69 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 70 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
Ralf Baechle | b99fbc1 | 2012-09-06 11:29:53 +0200 | [diff] [blame] | 71 | int type __maybe_unused; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | |
| 73 | if (vaddr < FIXADDR_START) { // FIXME |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 74 | pagefault_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | return; |
| 76 | } |
| 77 | |
Peter Zijlstra | 2027394 | 2010-10-27 15:32:58 -0700 | [diff] [blame] | 78 | type = kmap_atomic_idx(); |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 79 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 80 | { |
| 81 | int idx = type + KM_TYPE_NR * smp_processor_id(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 82 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 83 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
| 84 | |
| 85 | /* |
| 86 | * force other mappings to Oops if they'll try to access |
| 87 | * this pte without first remap it |
| 88 | */ |
| 89 | pte_clear(&init_mm, vaddr, kmap_pte-idx); |
| 90 | local_flush_tlb_one(vaddr); |
| 91 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 92 | #endif |
Peter Zijlstra | 2027394 | 2010-10-27 15:32:58 -0700 | [diff] [blame] | 93 | kmap_atomic_idx_pop(); |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 94 | pagefault_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 95 | } |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 96 | EXPORT_SYMBOL(__kunmap_atomic); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 97 | |
Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 98 | /* |
| 99 | * This is the same as kmap_atomic() but can map memory that doesn't |
| 100 | * have a struct page associated with it. |
| 101 | */ |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 102 | void *kmap_atomic_pfn(unsigned long pfn) |
Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 103 | { |
Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 104 | unsigned long vaddr; |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 105 | int idx, type; |
Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 106 | |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 107 | pagefault_disable(); |
Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 108 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 109 | type = kmap_atomic_idx_push(); |
Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 110 | idx = type + KM_TYPE_NR*smp_processor_id(); |
| 111 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 112 | set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); |
Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 113 | flush_tlb_one(vaddr); |
| 114 | |
| 115 | return (void*) vaddr; |
| 116 | } |
| 117 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 118 | struct page *kmap_atomic_to_page(void *ptr) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 119 | { |
| 120 | unsigned long idx, vaddr = (unsigned long)ptr; |
| 121 | pte_t *pte; |
| 122 | |
| 123 | if (vaddr < FIXADDR_START) |
| 124 | return virt_to_page(ptr); |
| 125 | |
| 126 | idx = virt_to_fix(vaddr); |
| 127 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); |
| 128 | return pte_page(*pte); |
| 129 | } |
| 130 | |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 131 | void __init kmap_init(void) |
| 132 | { |
| 133 | unsigned long kmap_vstart; |
| 134 | |
| 135 | /* cache the first kmap pte */ |
| 136 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); |
| 137 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); |
| 138 | } |