Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #include <linux/config.h> |
| 2 | #include <linux/module.h> |
| 3 | #include <linux/highmem.h> |
| 4 | #include <asm/tlbflush.h> |
| 5 | |
| 6 | void *__kmap(struct page *page) |
| 7 | { |
| 8 | void *addr; |
| 9 | |
| 10 | might_sleep(); |
| 11 | if (!PageHighMem(page)) |
| 12 | return page_address(page); |
| 13 | addr = kmap_high(page); |
| 14 | flush_tlb_one((unsigned long)addr); |
| 15 | |
| 16 | return addr; |
| 17 | } |
| 18 | |
| 19 | void __kunmap(struct page *page) |
| 20 | { |
| 21 | if (in_interrupt()) |
| 22 | BUG(); |
| 23 | if (!PageHighMem(page)) |
| 24 | return; |
| 25 | kunmap_high(page); |
| 26 | } |
| 27 | |
| 28 | /* |
| 29 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because |
| 30 | * no global lock is needed and because the kmap code must perform a global TLB |
| 31 | * invalidation when the kmap pool wraps. |
| 32 | * |
| 33 | * However when holding an atomic kmap is is not legal to sleep, so atomic |
| 34 | * kmaps are appropriate for short, tight code paths only. |
| 35 | */ |
| 36 | |
| 37 | void *__kmap_atomic(struct page *page, enum km_type type) |
| 38 | { |
| 39 | enum fixed_addresses idx; |
| 40 | unsigned long vaddr; |
| 41 | |
| 42 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
| 43 | inc_preempt_count(); |
| 44 | if (!PageHighMem(page)) |
| 45 | return page_address(page); |
| 46 | |
| 47 | idx = type + KM_TYPE_NR*smp_processor_id(); |
| 48 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 49 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 50 | if (!pte_none(*(kmap_pte-idx))) |
| 51 | BUG(); |
| 52 | #endif |
| 53 | set_pte(kmap_pte-idx, mk_pte(page, kmap_prot)); |
| 54 | local_flush_tlb_one((unsigned long)vaddr); |
| 55 | |
| 56 | return (void*) vaddr; |
| 57 | } |
| 58 | |
| 59 | void __kunmap_atomic(void *kvaddr, enum km_type type) |
| 60 | { |
| 61 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 62 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
| 63 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); |
| 64 | |
| 65 | if (vaddr < FIXADDR_START) { // FIXME |
| 66 | dec_preempt_count(); |
| 67 | preempt_check_resched(); |
| 68 | return; |
| 69 | } |
| 70 | |
| 71 | if (vaddr != __fix_to_virt(FIX_KMAP_BEGIN+idx)) |
| 72 | BUG(); |
| 73 | |
| 74 | /* |
| 75 | * force other mappings to Oops if they'll try to access |
| 76 | * this pte without first remap it |
| 77 | */ |
| 78 | pte_clear(&init_mm, vaddr, kmap_pte-idx); |
| 79 | local_flush_tlb_one(vaddr); |
| 80 | #endif |
| 81 | |
| 82 | dec_preempt_count(); |
| 83 | preempt_check_resched(); |
| 84 | } |
| 85 | |
Ralf Baechle | 6008026 | 2005-07-11 20:45:51 +0000 | [diff] [blame] | 86 | /* |
| 87 | * This is the same as kmap_atomic() but can map memory that doesn't |
| 88 | * have a struct page associated with it. |
| 89 | */ |
| 90 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) |
| 91 | { |
| 92 | enum fixed_addresses idx; |
| 93 | unsigned long vaddr; |
| 94 | |
| 95 | inc_preempt_count(); |
| 96 | |
| 97 | idx = type + KM_TYPE_NR*smp_processor_id(); |
| 98 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 99 | set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); |
| 100 | flush_tlb_one(vaddr); |
| 101 | |
| 102 | return (void*) vaddr; |
| 103 | } |
| 104 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | struct page *__kmap_atomic_to_page(void *ptr) |
| 106 | { |
| 107 | unsigned long idx, vaddr = (unsigned long)ptr; |
| 108 | pte_t *pte; |
| 109 | |
| 110 | if (vaddr < FIXADDR_START) |
| 111 | return virt_to_page(ptr); |
| 112 | |
| 113 | idx = virt_to_fix(vaddr); |
| 114 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); |
| 115 | return pte_page(*pte); |
| 116 | } |
| 117 | |
| 118 | EXPORT_SYMBOL(__kmap); |
| 119 | EXPORT_SYMBOL(__kunmap); |
| 120 | EXPORT_SYMBOL(__kmap_atomic); |
| 121 | EXPORT_SYMBOL(__kunmap_atomic); |
| 122 | EXPORT_SYMBOL(__kmap_atomic_to_page); |