blob: 8126e8d1a2a4a789509cb49af563b6cbb76395ae [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/highmem.h>
Alexey Dobriyan129f6942005-06-23 00:08:33 -07002#include <linux/module.h>
Pekka Enberg867c5b522009-03-03 14:10:12 +02003#include <linux/swap.h> /* for totalram_pages */
Linus Torvalds1da177e2005-04-16 15:20:36 -07004
5void *kmap(struct page *page)
6{
7 might_sleep();
8 if (!PageHighMem(page))
9 return page_address(page);
10 return kmap_high(page);
11}
12
13void kunmap(struct page *page)
14{
15 if (in_interrupt())
16 BUG();
17 if (!PageHighMem(page))
18 return;
19 kunmap_high(page);
20}
21
22/*
23 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
24 * no global lock is needed and because the kmap code must perform a global TLB
25 * invalidation when the kmap pool wraps.
26 *
27 * However when holding an atomic kmap is is not legal to sleep, so atomic
28 * kmaps are appropriate for short, tight code paths only.
29 */
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020030void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -070031{
32 enum fixed_addresses idx;
33 unsigned long vaddr;
Andrew Morton9c312052008-03-28 11:47:34 -070034
Linus Torvalds1da177e2005-04-16 15:20:36 -070035 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
Peter Zijlstraa8663742006-12-06 20:32:20 -080036 pagefault_disable();
Ingo Molnar656dad32007-02-10 01:46:36 -080037
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 if (!PageHighMem(page))
39 return page_address(page);
40
Akinobu Mitaf4112de2009-03-31 15:23:25 -070041 debug_kmap_atomic(type);
Andrew Morton9c312052008-03-28 11:47:34 -070042
Andrew Morton4150d3f2007-09-11 15:24:10 -070043 idx = type + KM_TYPE_NR*smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -070044 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
Andrew Morton4150d3f2007-09-11 15:24:10 -070045 BUG_ON(!pte_none(*(kmap_pte-idx)));
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020046 set_pte(kmap_pte-idx, mk_pte(page, prot));
Zachary Amsden49f19712007-04-08 16:04:01 -070047 arch_flush_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -070048
Andrew Morton4150d3f2007-09-11 15:24:10 -070049 return (void *)vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070050}
51
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020052void *kmap_atomic(struct page *page, enum km_type type)
53{
54 return kmap_atomic_prot(page, type, kmap_prot);
55}
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057void kunmap_atomic(void *kvaddr, enum km_type type)
58{
Linus Torvalds1da177e2005-04-16 15:20:36 -070059 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
60 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
61
Zachary Amsden23002d82006-09-30 23:29:35 -070062 /*
63 * Force other mappings to Oops if they'll try to access this pte
64 * without first remap it. Keeping stale mappings around is a bad idea
65 * also, in case the page changes cacheability attributes or becomes
66 * a protected page in a hypervisor.
67 */
Jeremy Fitzhardinge3b179792006-12-06 20:32:22 -080068 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
69 kpte_clear_flush(kmap_pte-idx, vaddr);
70 else {
71#ifdef CONFIG_DEBUG_HIGHMEM
72 BUG_ON(vaddr < PAGE_OFFSET);
73 BUG_ON(vaddr >= (unsigned long)high_memory);
74#endif
75 }
Linus Torvalds1da177e2005-04-16 15:20:36 -070076
Jeremy Fitzhardinge7b2f27f2007-05-02 19:27:15 +020077 arch_flush_lazy_mmu_mode();
Peter Zijlstraa8663742006-12-06 20:32:20 -080078 pagefault_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -070079}
80
Ingo Molnardd63fdc2009-03-13 03:20:49 +010081/*
82 * This is the same as kmap_atomic() but can map memory that doesn't
Akinobu Mitabb6d59c2009-03-11 23:33:18 +090083 * have a struct page associated with it.
84 */
85void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
86{
87 return kmap_atomic_prot_pfn(pfn, type, kmap_prot);
88}
Eric Anholtd1d8c922008-08-21 12:53:33 -070089EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
Vivek Goyal60e64d42005-06-25 14:58:19 -070090
Linus Torvalds1da177e2005-04-16 15:20:36 -070091struct page *kmap_atomic_to_page(void *ptr)
92{
93 unsigned long idx, vaddr = (unsigned long)ptr;
94 pte_t *pte;
95
96 if (vaddr < FIXADDR_START)
97 return virt_to_page(ptr);
98
99 idx = virt_to_fix(vaddr);
100 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
101 return pte_page(*pte);
102}
103
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700104EXPORT_SYMBOL(kmap);
105EXPORT_SYMBOL(kunmap);
106EXPORT_SYMBOL(kmap_atomic);
107EXPORT_SYMBOL(kunmap_atomic);
Pekka Enberg867c5b522009-03-03 14:10:12 +0200108
Pekka Enberg867c5b522009-03-03 14:10:12 +0200109void __init set_highmem_pages_init(void)
110{
111 struct zone *zone;
112 int nid;
113
114 for_each_zone(zone) {
115 unsigned long zone_start_pfn, zone_end_pfn;
116
117 if (!is_highmem(zone))
118 continue;
119
120 zone_start_pfn = zone->zone_start_pfn;
121 zone_end_pfn = zone_start_pfn + zone->spanned_pages;
122
123 nid = zone_to_nid(zone);
124 printk(KERN_INFO "Initializing %s for node %d (%08lx:%08lx)\n",
125 zone->name, nid, zone_start_pfn, zone_end_pfn);
126
127 add_highpages_with_active_regions(nid, zone_start_pfn,
128 zone_end_pfn);
129 }
130 totalram_pages += totalhigh_pages;
131}