Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #include <linux/highmem.h> |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 2 | #include <linux/module.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 3 | |
| 4 | void *kmap(struct page *page) |
| 5 | { |
| 6 | might_sleep(); |
| 7 | if (!PageHighMem(page)) |
| 8 | return page_address(page); |
| 9 | return kmap_high(page); |
| 10 | } |
| 11 | |
| 12 | void kunmap(struct page *page) |
| 13 | { |
| 14 | if (in_interrupt()) |
| 15 | BUG(); |
| 16 | if (!PageHighMem(page)) |
| 17 | return; |
| 18 | kunmap_high(page); |
| 19 | } |
| 20 | |
| 21 | /* |
| 22 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because |
| 23 | * no global lock is needed and because the kmap code must perform a global TLB |
| 24 | * invalidation when the kmap pool wraps. |
| 25 | * |
| 26 | * However when holding an atomic kmap is is not legal to sleep, so atomic |
| 27 | * kmaps are appropriate for short, tight code paths only. |
| 28 | */ |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 29 | void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 30 | { |
| 31 | enum fixed_addresses idx; |
| 32 | unsigned long vaddr; |
| 33 | |
| 34 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 35 | pagefault_disable(); |
Ingo Molnar | 656dad3 | 2007-02-10 01:46:36 -0800 | [diff] [blame] | 36 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | if (!PageHighMem(page)) |
| 38 | return page_address(page); |
| 39 | |
Andrew Morton | 4150d3f | 2007-09-11 15:24:10 -0700 | [diff] [blame^] | 40 | idx = type + KM_TYPE_NR*smp_processor_id(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
Andrew Morton | 4150d3f | 2007-09-11 15:24:10 -0700 | [diff] [blame^] | 42 | BUG_ON(!pte_none(*(kmap_pte-idx))); |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 43 | set_pte(kmap_pte-idx, mk_pte(page, prot)); |
Zachary Amsden | 49f1971 | 2007-04-08 16:04:01 -0700 | [diff] [blame] | 44 | arch_flush_lazy_mmu_mode(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | |
Andrew Morton | 4150d3f | 2007-09-11 15:24:10 -0700 | [diff] [blame^] | 46 | return (void *)vaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 47 | } |
| 48 | |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 49 | void *kmap_atomic(struct page *page, enum km_type type) |
| 50 | { |
| 51 | return kmap_atomic_prot(page, type, kmap_prot); |
| 52 | } |
| 53 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | void kunmap_atomic(void *kvaddr, enum km_type type) |
| 55 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
| 57 | enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id(); |
| 58 | |
Zachary Amsden | 23002d8 | 2006-09-30 23:29:35 -0700 | [diff] [blame] | 59 | /* |
| 60 | * Force other mappings to Oops if they'll try to access this pte |
| 61 | * without first remap it. Keeping stale mappings around is a bad idea |
| 62 | * also, in case the page changes cacheability attributes or becomes |
| 63 | * a protected page in a hypervisor. |
| 64 | */ |
Jeremy Fitzhardinge | 3b17979 | 2006-12-06 20:32:22 -0800 | [diff] [blame] | 65 | if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx)) |
| 66 | kpte_clear_flush(kmap_pte-idx, vaddr); |
| 67 | else { |
| 68 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 69 | BUG_ON(vaddr < PAGE_OFFSET); |
| 70 | BUG_ON(vaddr >= (unsigned long)high_memory); |
| 71 | #endif |
| 72 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 73 | |
Jeremy Fitzhardinge | 7b2f27f | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 74 | arch_flush_lazy_mmu_mode(); |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 75 | pagefault_enable(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 76 | } |
| 77 | |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 78 | /* This is the same as kmap_atomic() but can map memory that doesn't |
| 79 | * have a struct page associated with it. |
| 80 | */ |
| 81 | void *kmap_atomic_pfn(unsigned long pfn, enum km_type type) |
| 82 | { |
| 83 | enum fixed_addresses idx; |
| 84 | unsigned long vaddr; |
| 85 | |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 86 | pagefault_disable(); |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 87 | |
| 88 | idx = type + KM_TYPE_NR*smp_processor_id(); |
| 89 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
| 90 | set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot)); |
Zachary Amsden | 49f1971 | 2007-04-08 16:04:01 -0700 | [diff] [blame] | 91 | arch_flush_lazy_mmu_mode(); |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 92 | |
| 93 | return (void*) vaddr; |
| 94 | } |
| 95 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 96 | struct page *kmap_atomic_to_page(void *ptr) |
| 97 | { |
| 98 | unsigned long idx, vaddr = (unsigned long)ptr; |
| 99 | pte_t *pte; |
| 100 | |
| 101 | if (vaddr < FIXADDR_START) |
| 102 | return virt_to_page(ptr); |
| 103 | |
| 104 | idx = virt_to_fix(vaddr); |
| 105 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); |
| 106 | return pte_page(*pte); |
| 107 | } |
| 108 | |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 109 | EXPORT_SYMBOL(kmap); |
| 110 | EXPORT_SYMBOL(kunmap); |
| 111 | EXPORT_SYMBOL(kmap_atomic); |
| 112 | EXPORT_SYMBOL(kunmap_atomic); |
| 113 | EXPORT_SYMBOL(kmap_atomic_to_page); |