blob: b49962662101a0cf7361f0035e1b017333efc22a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/highmem.h>
Alexey Dobriyan129f6942005-06-23 00:08:33 -07002#include <linux/module.h>
Pekka Enberg867c5b522009-03-03 14:10:12 +02003#include <linux/swap.h> /* for totalram_pages */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5void *kmap(struct page *page)
6{
7 might_sleep();
8 if (!PageHighMem(page))
9 return page_address(page);
10 return kmap_high(page);
11}
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070012EXPORT_SYMBOL(kmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -070013
14void kunmap(struct page *page)
15{
16 if (in_interrupt())
17 BUG();
18 if (!PageHighMem(page))
19 return;
20 kunmap_high(page);
21}
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070022EXPORT_SYMBOL(kunmap);
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24/*
25 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
26 * no global lock is needed and because the kmap code must perform a global TLB
27 * invalidation when the kmap pool wraps.
28 *
Figo.zhang565b0c12009-06-29 12:02:55 +080029 * However when holding an atomic kmap it is not legal to sleep, so atomic
Linus Torvalds1da177e2005-04-16 15:20:36 -070030 * kmaps are appropriate for short, tight code paths only.
31 */
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070032void *kmap_atomic_prot(struct page *page, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -070033{
Linus Torvalds1da177e2005-04-16 15:20:36 -070034 unsigned long vaddr;
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070035 int idx, type;
Andrew Morton9c312052008-03-28 11:47:34 -070036
Linus Torvalds1da177e2005-04-16 15:20:36 -070037 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
Peter Zijlstraa8663742006-12-06 20:32:20 -080038 pagefault_disable();
Ingo Molnar656dad32007-02-10 01:46:36 -080039
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 if (!PageHighMem(page))
41 return page_address(page);
42
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070043 type = kmap_atomic_idx_push();
Andrew Morton4150d3f2007-09-11 15:24:10 -070044 idx = type + KM_TYPE_NR*smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
Andrew Morton4150d3f2007-09-11 15:24:10 -070046 BUG_ON(!pte_none(*(kmap_pte-idx)));
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020047 set_pte(kmap_pte-idx, mk_pte(page, prot));
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Andrew Morton4150d3f2007-09-11 15:24:10 -070049 return (void *)vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050}
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070051EXPORT_SYMBOL(kmap_atomic_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -070052
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070053void *__kmap_atomic(struct page *page)
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020054{
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070055 return kmap_atomic_prot(page, kmap_prot);
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020056}
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070057EXPORT_SYMBOL(__kmap_atomic);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
Ingo Molnardd63fdc2009-03-13 03:20:49 +010059/*
60 * This is the same as kmap_atomic() but can map memory that doesn't
Vivek Goyal60e64d42005-06-25 14:58:19 -070061 * have a struct page associated with it.
62 */
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070063void *kmap_atomic_pfn(unsigned long pfn)
Vivek Goyal60e64d42005-06-25 14:58:19 -070064{
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070065 return kmap_atomic_prot_pfn(pfn, kmap_prot);
Vivek Goyal60e64d42005-06-25 14:58:19 -070066}
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070067EXPORT_SYMBOL_GPL(kmap_atomic_pfn);
68
69void __kunmap_atomic(void *kvaddr)
70{
71 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
72
73 if (vaddr >= __fix_to_virt(FIX_KMAP_END) &&
74 vaddr <= __fix_to_virt(FIX_KMAP_BEGIN)) {
75 int idx, type;
76
Peter Zijlstra20273942010-10-27 15:32:58 -070077 type = kmap_atomic_idx();
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070078 idx = type + KM_TYPE_NR * smp_processor_id();
79
80#ifdef CONFIG_DEBUG_HIGHMEM
81 WARN_ON_ONCE(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
82#endif
83 /*
84 * Force other mappings to Oops if they'll try to access this
85 * pte without first remap it. Keeping stale mappings around
86 * is a bad idea also, in case the page changes cacheability
87 * attributes or becomes a protected page in a hypervisor.
88 */
89 kpte_clear_flush(kmap_pte-idx, vaddr);
Peter Zijlstra20273942010-10-27 15:32:58 -070090 kmap_atomic_idx_pop();
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070091 }
92#ifdef CONFIG_DEBUG_HIGHMEM
93 else {
94 BUG_ON(vaddr < PAGE_OFFSET);
95 BUG_ON(vaddr >= (unsigned long)high_memory);
96 }
97#endif
98
99 pagefault_enable();
100}
101EXPORT_SYMBOL(__kunmap_atomic);
Vivek Goyal60e64d42005-06-25 14:58:19 -0700102
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103struct page *kmap_atomic_to_page(void *ptr)
104{
105 unsigned long idx, vaddr = (unsigned long)ptr;
106 pte_t *pte;
107
108 if (vaddr < FIXADDR_START)
109 return virt_to_page(ptr);
110
111 idx = virt_to_fix(vaddr);
112 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
113 return pte_page(*pte);
114}
Avi Kivity256cd2e2009-08-10 10:41:27 +0300115EXPORT_SYMBOL(kmap_atomic_to_page);
Pekka Enberg867c5b522009-03-03 14:10:12 +0200116
Pekka Enberg867c5b522009-03-03 14:10:12 +0200117void __init set_highmem_pages_init(void)
118{
119 struct zone *zone;
120 int nid;
121
122 for_each_zone(zone) {
123 unsigned long zone_start_pfn, zone_end_pfn;
124
125 if (!is_highmem(zone))
126 continue;
127
128 zone_start_pfn = zone->zone_start_pfn;
129 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
130
131 nid = zone_to_nid(zone);
132 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
133 zone->name, nid, zone_start_pfn, zone_end_pfn);
134
135 add_highpages_with_active_regions(nid, zone_start_pfn,
136 zone_end_pfn);
137 }
138 totalram_pages += totalhigh_pages;
139}