Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #include <linux/module.h> |
| 2 | #include <linux/highmem.h> |
Yoichi Yuasa | 52ab320 | 2010-02-20 21:23:22 +0900 | [diff] [blame] | 3 | #include <linux/sched.h> |
Ralf Baechle | 631330f | 2009-06-19 14:05:26 +0100 | [diff] [blame] | 4 | #include <linux/smp.h> |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 5 | #include <asm/fixmap.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 6 | #include <asm/tlbflush.h> |
| 7 | |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 8 | static pte_t *kmap_pte; |
| 9 | |
| 10 | unsigned long highstart_pfn, highend_pfn; |
| 11 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 12 | void *__kmap(struct page *page) |
| 13 | { |
| 14 | void *addr; |
| 15 | |
| 16 | might_sleep(); |
| 17 | if (!PageHighMem(page)) |
| 18 | return page_address(page); |
| 19 | addr = kmap_high(page); |
| 20 | flush_tlb_one((unsigned long)addr); |
| 21 | |
| 22 | return addr; |
| 23 | } |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 24 | EXPORT_SYMBOL(__kmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 25 | |
| 26 | void __kunmap(struct page *page) |
| 27 | { |
Ralf Baechle | b72b709 | 2009-03-30 14:49:44 +0200 | [diff] [blame] | 28 | BUG_ON(in_interrupt()); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | if (!PageHighMem(page)) |
| 30 | return; |
| 31 | kunmap_high(page); |
| 32 | } |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 33 | EXPORT_SYMBOL(__kunmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | |
| 35 | /* |
| 36 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because |
| 37 | * no global lock is needed and because the kmap code must perform a global TLB |
| 38 | * invalidation when the kmap pool wraps. |
| 39 | * |
| 40 | * However when holding an atomic kmap is is not legal to sleep, so atomic |
| 41 | * kmaps are appropriate for short, tight code paths only. |
| 42 | */ |
| 43 | |
| 44 | void *__kmap_atomic(struct page *page, enum km_type type) |
| 45 | { |
| 46 | enum fixed_addresses idx; |
| 47 | unsigned long vaddr; |
| 48 | |
| 49 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 50 | pagefault_disable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 51 | if (!PageHighMem(page)) |
| 52 | return page_address(page); |
| 53 | |
Akinobu Mita | 7ca43e7 | 2009-03-31 15:23:25 -0700 | [diff] [blame] | 54 | debug_kmap_atomic(type); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | idx = type + KM_TYPE_NR*smp_processor_id(); |
| 56 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 57 | #ifdef CONFIG_DEBUG_HIGHMEM |
Ralf Baechle | b72b709 | 2009-03-30 14:49:44 +0200 | [diff] [blame] | 58 | BUG_ON(!pte_none(*(kmap_pte - idx))); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 59 | #endif |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 60 | set_pte(kmap_pte-idx, mk_pte(page, PAGE_KERNEL)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 61 | local_flush_tlb_one((unsigned long)vaddr); |
| 62 | |
| 63 | return (void*) vaddr; |
| 64 | } |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 65 | EXPORT_SYMBOL(__kmap_atomic); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 66 | |
| 67 | void __kunmap_atomic(void *kvaddr, enum km_type type) |
| 68 | { |
| 69 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 70 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
| 71 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); |
| 72 | |
| 73 | if (vaddr < FIXADDR_START) { // FIXME |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 74 | pagefault_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 75 | return; |
| 76 | } |
| 77 | |
Ralf Baechle | b72b709 | 2009-03-30 14:49:44 +0200 | [diff] [blame] | 78 | BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 79 | |
| 80 | /* |
| 81 | * force other mappings to Oops if they'll try to access |
| 82 | * this pte without first remap it |
| 83 | */ |
| 84 | pte_clear(&init_mm, vaddr, kmap_pte-idx); |
| 85 | local_flush_tlb_one(vaddr); |
| 86 | #endif |
| 87 | |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 88 | pagefault_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 89 | } |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 90 | EXPORT_SYMBOL(__kunmap_atomic); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 91 | |
Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 92 | /* |
| 93 | * This is the same as kmap_atomic() but can map memory that doesn't |
| 94 | * have a struct page associated with it. |
| 95 | */ |
| 96 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) |
| 97 | { |
| 98 | enum fixed_addresses idx; |
| 99 | unsigned long vaddr; |
| 100 | |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 101 | pagefault_disable(); |
Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 102 | |
Akinobu Mita | 7ca43e7 | 2009-03-31 15:23:25 -0700 | [diff] [blame] | 103 | debug_kmap_atomic(type); |
Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 104 | idx = type + KM_TYPE_NR*smp_processor_id(); |
| 105 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 106 | set_pte(kmap_pte-idx, pfn_pte(pfn, PAGE_KERNEL)); |
Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 107 | flush_tlb_one(vaddr); |
| 108 | |
| 109 | return (void*) vaddr; |
| 110 | } |
| 111 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 112 | struct page *__kmap_atomic_to_page(void *ptr) |
| 113 | { |
| 114 | unsigned long idx, vaddr = (unsigned long)ptr; |
| 115 | pte_t *pte; |
| 116 | |
| 117 | if (vaddr < FIXADDR_START) |
| 118 | return virt_to_page(ptr); |
| 119 | |
| 120 | idx = virt_to_fix(vaddr); |
| 121 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); |
| 122 | return pte_page(*pte); |
| 123 | } |
| 124 | |
Ralf Baechle | bb86bf2 | 2009-04-25 11:25:34 +0200 | [diff] [blame] | 125 | void __init kmap_init(void) |
| 126 | { |
| 127 | unsigned long kmap_vstart; |
| 128 | |
| 129 | /* cache the first kmap pte */ |
| 130 | kmap_vstart = __fix_to_virt(FIX_KMAP_BEGIN); |
| 131 | kmap_pte = kmap_get_fixmap_pte(kmap_vstart); |
| 132 | } |