blob: bcc079c282dd33112a48b250ec227610c39697cb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#include <linux/highmem.h>
Alexey Dobriyan129f6942005-06-23 00:08:33 -07002#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07003
4void *kmap(struct page *page)
5{
6 might_sleep();
7 if (!PageHighMem(page))
8 return page_address(page);
9 return kmap_high(page);
10}
11
12void kunmap(struct page *page)
13{
14 if (in_interrupt())
15 BUG();
16 if (!PageHighMem(page))
17 return;
18 kunmap_high(page);
19}
20
Andrew Morton022eb432008-01-30 13:30:47 +010021static void debug_kmap_atomic_prot(enum km_type type)
22{
23#ifdef CONFIG_DEBUG_HIGHMEM
24 static unsigned warn_count = 10;
25
26 if (unlikely(warn_count == 0))
27 return;
28
29 if (unlikely(in_interrupt())) {
30 if (in_irq()) {
31 if (type != KM_IRQ0 && type != KM_IRQ1 &&
32 type != KM_BIO_SRC_IRQ && type != KM_BIO_DST_IRQ &&
33 type != KM_BOUNCE_READ) {
34 WARN_ON(1);
35 warn_count--;
36 }
37 } else if (!irqs_disabled()) { /* softirq */
38 if (type != KM_IRQ0 && type != KM_IRQ1 &&
39 type != KM_SOFTIRQ0 && type != KM_SOFTIRQ1 &&
40 type != KM_SKB_SUNRPC_DATA &&
41 type != KM_SKB_DATA_SOFTIRQ &&
42 type != KM_BOUNCE_READ) {
43 WARN_ON(1);
44 warn_count--;
45 }
46 }
47 }
48
49 if (type == KM_IRQ0 || type == KM_IRQ1 || type == KM_BOUNCE_READ ||
50 type == KM_BIO_SRC_IRQ || type == KM_BIO_DST_IRQ) {
51 if (!irqs_disabled()) {
52 WARN_ON(1);
53 warn_count--;
54 }
55 } else if (type == KM_SOFTIRQ0 || type == KM_SOFTIRQ1) {
56 if (irq_count() == 0 && !irqs_disabled()) {
57 WARN_ON(1);
58 warn_count--;
59 }
60 }
61#endif
62}
63
Linus Torvalds1da177e2005-04-16 15:20:36 -070064/*
65 * kmap_atomic/kunmap_atomic is significantly faster than kmap/kunmap because
66 * no global lock is needed and because the kmap code must perform a global TLB
67 * invalidation when the kmap pool wraps.
68 *
69 * However when holding an atomic kmap is is not legal to sleep, so atomic
70 * kmaps are appropriate for short, tight code paths only.
71 */
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020072void *kmap_atomic_prot(struct page *page, enum km_type type, pgprot_t prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
74 enum fixed_addresses idx;
75 unsigned long vaddr;
Andrew Morton9c312052008-03-28 11:47:34 -070076
Linus Torvalds1da177e2005-04-16 15:20:36 -070077 /* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
Peter Zijlstraa8663742006-12-06 20:32:20 -080078 pagefault_disable();
Ingo Molnar656dad32007-02-10 01:46:36 -080079
Linus Torvalds1da177e2005-04-16 15:20:36 -070080 if (!PageHighMem(page))
81 return page_address(page);
82
Andrew Morton9c312052008-03-28 11:47:34 -070083 debug_kmap_atomic_prot(type);
84
Andrew Morton4150d3f2007-09-11 15:24:10 -070085 idx = type + KM_TYPE_NR*smp_processor_id();
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
Andrew Morton4150d3f2007-09-11 15:24:10 -070087 BUG_ON(!pte_none(*(kmap_pte-idx)));
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020088 set_pte(kmap_pte-idx, mk_pte(page, prot));
Zachary Amsden49f19712007-04-08 16:04:01 -070089 arch_flush_lazy_mmu_mode();
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Andrew Morton4150d3f2007-09-11 15:24:10 -070091 return (void *)vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -070092}
93
Jeremy Fitzhardingece6234b2007-05-02 19:27:15 +020094void *kmap_atomic(struct page *page, enum km_type type)
95{
96 return kmap_atomic_prot(page, type, kmap_prot);
97}
98
Linus Torvalds1da177e2005-04-16 15:20:36 -070099void kunmap_atomic(void *kvaddr, enum km_type type)
100{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
102 enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();
103
Zachary Amsden23002d82006-09-30 23:29:35 -0700104 /*
105 * Force other mappings to Oops if they'll try to access this pte
106 * without first remap it. Keeping stale mappings around is a bad idea
107 * also, in case the page changes cacheability attributes or becomes
108 * a protected page in a hypervisor.
109 */
Jeremy Fitzhardinge3b179792006-12-06 20:32:22 -0800110 if (vaddr == __fix_to_virt(FIX_KMAP_BEGIN+idx))
111 kpte_clear_flush(kmap_pte-idx, vaddr);
112 else {
113#ifdef CONFIG_DEBUG_HIGHMEM
114 BUG_ON(vaddr < PAGE_OFFSET);
115 BUG_ON(vaddr >= (unsigned long)high_memory);
116#endif
117 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Jeremy Fitzhardinge7b2f27f2007-05-02 19:27:15 +0200119 arch_flush_lazy_mmu_mode();
Peter Zijlstraa8663742006-12-06 20:32:20 -0800120 pagefault_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121}
122
Vivek Goyal60e64d42005-06-25 14:58:19 -0700123/* This is the same as kmap_atomic() but can map memory that doesn't
124 * have a struct page associated with it.
125 */
126void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
127{
128 enum fixed_addresses idx;
129 unsigned long vaddr;
130
Peter Zijlstraa8663742006-12-06 20:32:20 -0800131 pagefault_disable();
Vivek Goyal60e64d42005-06-25 14:58:19 -0700132
133 idx = type + KM_TYPE_NR*smp_processor_id();
134 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
135 set_pte(kmap_pte-idx, pfn_pte(pfn, kmap_prot));
Zachary Amsden49f19712007-04-08 16:04:01 -0700136 arch_flush_lazy_mmu_mode();
Vivek Goyal60e64d42005-06-25 14:58:19 -0700137
138 return (void*) vaddr;
139}
Eric Anholtd1d8c922008-08-21 12:53:33 -0700140EXPORT_SYMBOL_GPL(kmap_atomic_pfn); /* temporarily in use by i915 GEM until vmap */
Vivek Goyal60e64d42005-06-25 14:58:19 -0700141
Linus Torvalds1da177e2005-04-16 15:20:36 -0700142struct page *kmap_atomic_to_page(void *ptr)
143{
144 unsigned long idx, vaddr = (unsigned long)ptr;
145 pte_t *pte;
146
147 if (vaddr < FIXADDR_START)
148 return virt_to_page(ptr);
149
150 idx = virt_to_fix(vaddr);
151 pte = kmap_pte - (idx - FIX_KMAP_BEGIN);
152 return pte_page(*pte);
153}
154
Alexey Dobriyan129f6942005-06-23 00:08:33 -0700155EXPORT_SYMBOL(kmap);
156EXPORT_SYMBOL(kunmap);
157EXPORT_SYMBOL(kmap_atomic);
158EXPORT_SYMBOL(kunmap_atomic);