Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | #include <linux/highmem.h> |
Alexey Dobriyan | 129f694 | 2005-06-23 00:08:33 -0700 | [diff] [blame] | 2 | #include <linux/module.h> |
Pekka Enberg | 867c5b52 | 2009-03-03 14:10:12 +0200 | [diff] [blame] | 3 | #include <linux/swap.h> /* for totalram_pages */ |
Jiang Liu | 7b4b2a0 | 2013-07-03 15:03:11 -0700 | [diff] [blame] | 4 | #include <linux/bootmem.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | |
| 6 | void *kmap(struct page *page) |
| 7 | { |
| 8 | might_sleep(); |
| 9 | if (!PageHighMem(page)) |
| 10 | return page_address(page); |
| 11 | return kmap_high(page); |
| 12 | } |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 13 | EXPORT_SYMBOL(kmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 14 | |
| 15 | void kunmap(struct page *page) |
| 16 | { |
| 17 | if (in_interrupt()) |
| 18 | BUG(); |
| 19 | if (!PageHighMem(page)) |
| 20 | return; |
| 21 | kunmap_high(page); |
| 22 | } |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 23 | EXPORT_SYMBOL(kunmap); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 24 | |
| 25 | /* |
| 26 | * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because |
| 27 | * no global lock is needed and because the kmap code must perform a global TLB |
| 28 | * invalidation when the kmap pool wraps. |
| 29 | * |
Figo.zhang | 565b0c1 | 2009-06-29 12:02:55 +0800 | [diff] [blame] | 30 | * However when holding an atomic kmap it is not legal to sleep, so atomic |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 31 | * kmaps are appropriate for short, tight code paths only. |
| 32 | */ |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 33 | void *kmap_atomic_prot(struct page *page, pgprot_t prot) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 34 | { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | unsigned long vaddr; |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 36 | int idx, type; |
Andrew Morton | 9c31205 | 2008-03-28 11:47:34 -0700 | [diff] [blame] | 37 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */ |
Peter Zijlstra | a866374 | 2006-12-06 20:32:20 -0800 | [diff] [blame] | 39 | pagefault_disable(); |
Ingo Molnar | 656dad3 | 2007-02-10 01:46:36 -0800 | [diff] [blame] | 40 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | if (!PageHighMem(page)) |
| 42 | return page_address(page); |
| 43 | |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 44 | type = kmap_atomic_idx_push(); |
Andrew Morton | 4150d3f | 2007-09-11 15:24:10 -0700 | [diff] [blame] | 45 | idx = type + KM_TYPE_NR*smp_processor_id(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 46 | vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx); |
Andrew Morton | 4150d3f | 2007-09-11 15:24:10 -0700 | [diff] [blame] | 47 | BUG_ON(!pte_none(*(kmap_pte-idx))); |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 48 | set_pte(kmap_pte-idx, mk_pte(page, prot)); |
Konrad Rzeszutek Wilk | 2cd1c8d | 2011-11-15 14:49:09 -0800 | [diff] [blame] | 49 | arch_flush_lazy_mmu_mode(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
Andrew Morton | 4150d3f | 2007-09-11 15:24:10 -0700 | [diff] [blame] | 51 | return (void *)vaddr; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | } |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 53 | EXPORT_SYMBOL(kmap_atomic_prot); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 54 | |
Cong Wang | a24401b | 2011-11-26 10:53:39 +0800 | [diff] [blame] | 55 | void *kmap_atomic(struct page *page) |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 56 | { |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 57 | return kmap_atomic_prot(page, kmap_prot); |
Jeremy Fitzhardinge | ce6234b | 2007-05-02 19:27:15 +0200 | [diff] [blame] | 58 | } |
Cong Wang | a24401b | 2011-11-26 10:53:39 +0800 | [diff] [blame] | 59 | EXPORT_SYMBOL(kmap_atomic); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 60 | |
Ingo Molnar | dd63fdc | 2009-03-13 03:20:49 +0100 | [diff] [blame] | 61 | /* |
| 62 | * This is the same as kmap_atomic() but can map memory that doesn't |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 63 | * have a struct page associated with it. |
| 64 | */ |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 65 | void *kmap_atomic_pfn(unsigned long pfn) |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 66 | { |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 67 | return kmap_atomic_prot_pfn(pfn, kmap_prot); |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 68 | } |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 69 | EXPORT_SYMBOL_GPL(kmap_atomic_pfn); |
| 70 | |
| 71 | void __kunmap_atomic(void *kvaddr) |
| 72 | { |
| 73 | unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK; |
| 74 | |
| 75 | if (vaddr >= __fix_to_virt(FIX_KMAP_END) && |
| 76 | vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) { |
| 77 | int idx, type; |
| 78 | |
Peter Zijlstra | 2027394 | 2010-10-27 15:32:58 -0700 | [diff] [blame] | 79 | type = kmap_atomic_idx(); |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 80 | idx = type + KM_TYPE_NR * smp_processor_id(); |
| 81 | |
| 82 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 83 | WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx)); |
| 84 | #endif |
| 85 | /* |
| 86 | * Force other mappings to Oops if they'll try to access this |
| 87 | * pte without first remap it. Keeping stale mappings around |
| 88 | * is a bad idea also, in case the page changes cacheability |
| 89 | * attributes or becomes a protected page in a hypervisor. |
| 90 | */ |
| 91 | kpte_clear_flush(kmap_pte-idx, vaddr); |
Peter Zijlstra | 2027394 | 2010-10-27 15:32:58 -0700 | [diff] [blame] | 92 | kmap_atomic_idx_pop(); |
Konrad Rzeszutek Wilk | 2cd1c8d | 2011-11-15 14:49:09 -0800 | [diff] [blame] | 93 | arch_flush_lazy_mmu_mode(); |
Peter Zijlstra | 3e4d3af | 2010-10-26 14:21:51 -0700 | [diff] [blame] | 94 | } |
| 95 | #ifdef CONFIG_DEBUG_HIGHMEM |
| 96 | else { |
| 97 | BUG_ON(vaddr < PAGE_OFFSET); |
| 98 | BUG_ON(vaddr >= (unsigned long)high_memory); |
| 99 | } |
| 100 | #endif |
| 101 | |
| 102 | pagefault_enable(); |
| 103 | } |
| 104 | EXPORT_SYMBOL(__kunmap_atomic); |
Vivek Goyal | 60e64d4 | 2005-06-25 14:58:19 -0700 | [diff] [blame] | 105 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 106 | struct page *kmap_atomic_to_page(void *ptr) |
| 107 | { |
| 108 | unsigned long idx, vaddr = (unsigned long)ptr; |
| 109 | pte_t *pte; |
| 110 | |
| 111 | if (vaddr < FIXADDR_START) |
| 112 | return virt_to_page(ptr); |
| 113 | |
| 114 | idx = virt_to_fix(vaddr); |
| 115 | pte = kmap_pte - (idx - FIX_KMAP_BEGIN); |
| 116 | return pte_page(*pte); |
| 117 | } |
Avi Kivity | 256cd2e | 2009-08-10 10:41:27 +0300 | [diff] [blame] | 118 | EXPORT_SYMBOL(kmap_atomic_to_page); |
Pekka Enberg | 867c5b52 | 2009-03-03 14:10:12 +0200 | [diff] [blame] | 119 | |
Pekka Enberg | 867c5b52 | 2009-03-03 14:10:12 +0200 | [diff] [blame] | 120 | void __init set_highmem_pages_init(void) |
| 121 | { |
| 122 | struct zone *zone; |
| 123 | int nid; |
| 124 | |
Jiang Liu | 7b4b2a0 | 2013-07-03 15:03:11 -0700 | [diff] [blame] | 125 | /* |
| 126 | * Explicitly reset zone->managed_pages because set_highmem_pages_init() |
| 127 | * is invoked before free_all_bootmem() |
| 128 | */ |
| 129 | reset_all_zones_managed_pages(); |
Pekka Enberg | 867c5b52 | 2009-03-03 14:10:12 +0200 | [diff] [blame] | 130 | for_each_zone(zone) { |
| 131 | unsigned long zone_start_pfn, zone_end_pfn; |
| 132 | |
| 133 | if (!is_highmem(zone)) |
| 134 | continue; |
| 135 | |
| 136 | zone_start_pfn = zone->zone_start_pfn; |
| 137 | zone_end_pfn = zone_start_pfn + zone->spanned_pages; |
| 138 | |
| 139 | nid = zone_to_nid(zone); |
| 140 | printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n", |
| 141 | zone->name, nid, zone_start_pfn, zone_end_pfn); |
| 142 | |
| 143 | add_highpages_with_active_regions(nid, zone_start_pfn, |
| 144 | zone_end_pfn); |
| 145 | } |
Pekka Enberg | 867c5b52 | 2009-03-03 14:10:12 +0200 | [diff] [blame] | 146 | } |