blob: d6f9ee8f10b795f7aadf7a48a7358d26cb30d3c7 [file] [log] [blame]
Nicolas Pitred73cd422008-09-15 16:44:55 -04001/*
2 * arch/arm/mm/highmem.c -- ARM highmem support
3 *
4 * Author: Nicolas Pitre
5 * Created: september 8, 2008
6 * Copyright: Marvell Semiconductors Inc.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License version 2 as
10 * published by the Free Software Foundation.
11 */
12
Laura Abbott0cd39aa2013-04-05 14:12:53 -070013#include <linux/cpu.h>
Nicolas Pitred73cd422008-09-15 16:44:55 -040014#include <linux/module.h>
15#include <linux/highmem.h>
16#include <linux/interrupt.h>
17#include <asm/fixmap.h>
18#include <asm/cacheflush.h>
19#include <asm/tlbflush.h>
20#include "mm.h"
21
22void *kmap(struct page *page)
23{
24 might_sleep();
25 if (!PageHighMem(page))
26 return page_address(page);
27 return kmap_high(page);
28}
29EXPORT_SYMBOL(kmap);
30
31void kunmap(struct page *page)
32{
33 BUG_ON(in_interrupt());
34 if (!PageHighMem(page))
35 return;
36 kunmap_high(page);
37}
38EXPORT_SYMBOL(kunmap);
39
Cong Wanga24401b2011-11-26 10:53:39 +080040void *kmap_atomic(struct page *page)
Nicolas Pitred73cd422008-09-15 16:44:55 -040041{
42 unsigned int idx;
43 unsigned long vaddr;
Nicolas Pitre7929eb92009-09-03 21:45:59 +010044 void *kmap;
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070045 int type;
Nicolas Pitred73cd422008-09-15 16:44:55 -040046
47 pagefault_disable();
48 if (!PageHighMem(page))
49 return page_address(page);
50
Nicolas Pitre17ebba12010-06-07 21:28:55 +010051#ifdef CONFIG_DEBUG_HIGHMEM
52 /*
53 * There is no cache coherency issue when non VIVT, so force the
54 * dedicated kmap usage for better debugging purposes in that case.
55 */
56 if (!cache_is_vivt())
57 kmap = NULL;
58 else
59#endif
60 kmap = kmap_high_get(page);
Nicolas Pitre7929eb92009-09-03 21:45:59 +010061 if (kmap)
62 return kmap;
63
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070064 type = kmap_atomic_idx_push();
65
Nicolas Pitred73cd422008-09-15 16:44:55 -040066 idx = type + KM_TYPE_NR * smp_processor_id();
67 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
68#ifdef CONFIG_DEBUG_HIGHMEM
69 /*
70 * With debugging enabled, kunmap_atomic forces that entry to 0.
71 * Make sure it was indeed properly unmapped.
72 */
Russell King0d31fe42011-07-04 11:22:27 +010073 BUG_ON(!pte_none(get_top_pte(vaddr)));
Nicolas Pitred73cd422008-09-15 16:44:55 -040074#endif
Nicolas Pitred73cd422008-09-15 16:44:55 -040075 /*
76 * When debugging is off, kunmap_atomic leaves the previous mapping
Russell King67ece142011-07-02 15:20:44 +010077 * in place, so the contained TLB flush ensures the TLB is updated
78 * with the new mapping.
Nicolas Pitred73cd422008-09-15 16:44:55 -040079 */
Russell King67ece142011-07-02 15:20:44 +010080 set_top_pte(vaddr, mk_pte(page, kmap_prot));
Nicolas Pitred73cd422008-09-15 16:44:55 -040081
82 return (void *)vaddr;
83}
Cong Wanga24401b2011-11-26 10:53:39 +080084EXPORT_SYMBOL(kmap_atomic);
Nicolas Pitred73cd422008-09-15 16:44:55 -040085
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070086void __kunmap_atomic(void *kvaddr)
Nicolas Pitred73cd422008-09-15 16:44:55 -040087{
88 unsigned long vaddr = (unsigned long) kvaddr & PAGE_MASK;
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070089 int idx, type;
Nicolas Pitred73cd422008-09-15 16:44:55 -040090
91 if (kvaddr >= (void *)FIXADDR_START) {
Peter Zijlstra20273942010-10-27 15:32:58 -070092 type = kmap_atomic_idx();
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -070093 idx = type + KM_TYPE_NR * smp_processor_id();
94
Nicolas Pitre7e5a69e2010-03-29 21:46:02 +010095 if (cache_is_vivt())
96 __cpuc_flush_dcache_area((void *)vaddr, PAGE_SIZE);
Nicolas Pitred73cd422008-09-15 16:44:55 -040097#ifdef CONFIG_DEBUG_HIGHMEM
98 BUG_ON(vaddr != __fix_to_virt(FIX_KMAP_BEGIN + idx));
Russell King67ece142011-07-02 15:20:44 +010099 set_top_pte(vaddr, __pte(0));
Nicolas Pitred73cd422008-09-15 16:44:55 -0400100#else
101 (void) idx; /* to kill a warning */
102#endif
Peter Zijlstra20273942010-10-27 15:32:58 -0700103 kmap_atomic_idx_pop();
Nicolas Pitre7929eb92009-09-03 21:45:59 +0100104 } else if (vaddr >= PKMAP_ADDR(0) && vaddr < PKMAP_ADDR(LAST_PKMAP)) {
105 /* this address was obtained through kmap_high_get() */
106 kunmap_high(pte_page(pkmap_page_table[PKMAP_NR(vaddr)]));
Nicolas Pitred73cd422008-09-15 16:44:55 -0400107 }
108 pagefault_enable();
109}
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700110EXPORT_SYMBOL(__kunmap_atomic);
Nicolas Pitred73cd422008-09-15 16:44:55 -0400111
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700112void *kmap_atomic_pfn(unsigned long pfn)
Nicolas Pitred73cd422008-09-15 16:44:55 -0400113{
Nicolas Pitred73cd422008-09-15 16:44:55 -0400114 unsigned long vaddr;
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700115 int idx, type;
Nicolas Pitred73cd422008-09-15 16:44:55 -0400116
117 pagefault_disable();
118
Peter Zijlstra3e4d3af2010-10-26 14:21:51 -0700119 type = kmap_atomic_idx_push();
Nicolas Pitred73cd422008-09-15 16:44:55 -0400120 idx = type + KM_TYPE_NR * smp_processor_id();
121 vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
122#ifdef CONFIG_DEBUG_HIGHMEM
Russell King0d31fe42011-07-04 11:22:27 +0100123 BUG_ON(!pte_none(get_top_pte(vaddr)));
Nicolas Pitred73cd422008-09-15 16:44:55 -0400124#endif
Russell King67ece142011-07-02 15:20:44 +0100125 set_top_pte(vaddr, pfn_pte(pfn, kmap_prot));
Nicolas Pitred73cd422008-09-15 16:44:55 -0400126
127 return (void *)vaddr;
128}
129
130struct page *kmap_atomic_to_page(const void *ptr)
131{
132 unsigned long vaddr = (unsigned long)ptr;
Nicolas Pitred73cd422008-09-15 16:44:55 -0400133
134 if (vaddr < FIXADDR_START)
135 return virt_to_page(ptr);
136
Russell King0d31fe42011-07-04 11:22:27 +0100137 return pte_page(get_top_pte(vaddr));
Nicolas Pitred73cd422008-09-15 16:44:55 -0400138}
Laura Abbott0cd39aa2013-04-05 14:12:53 -0700139
140#ifdef CONFIG_ARCH_WANT_KMAP_ATOMIC_FLUSH
141static void kmap_remove_unused_cpu(int cpu)
142{
143 int start_idx, idx, type;
144
145 pagefault_disable();
146 type = kmap_atomic_idx();
147 start_idx = type + 1 + KM_TYPE_NR * cpu;
148
149 for (idx = start_idx; idx < KM_TYPE_NR + KM_TYPE_NR * cpu; idx++) {
150 unsigned long vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
151 pte_t ptep;
152
153 ptep = get_top_pte(vaddr);
154 if (ptep)
155 set_top_pte(vaddr, __pte(0));
156 }
157 pagefault_enable();
158}
159
160static void kmap_remove_unused(void *unused)
161{
162 kmap_remove_unused_cpu(smp_processor_id());
163}
164
165void kmap_atomic_flush_unused(void)
166{
167 on_each_cpu(kmap_remove_unused, NULL, 1);
168}
169
170static int hotplug_kmap_atomic_callback(struct notifier_block *nfb,
171 unsigned long action, void *hcpu)
172{
173 switch (action & (~CPU_TASKS_FROZEN)) {
174 case CPU_DYING:
175 kmap_remove_unused_cpu((int)hcpu);
176 break;
177 default:
178 break;
179 }
180
181 return NOTIFY_OK;
182}
183
184static struct notifier_block hotplug_kmap_atomic_notifier = {
185 .notifier_call = hotplug_kmap_atomic_callback,
186};
187
188static int __init init_kmap_atomic(void)
189{
190 return register_hotcpu_notifier(&hotplug_kmap_atomic_notifier);
191}
192early_initcall(init_kmap_atomic);
193#endif