blob: 47bd477c8eccb4e2691f9a21fc3e09e3e7beb839 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
4 */
5
Linus Torvalds1da177e2005-04-16 15:20:36 -07006#include <linux/mm.h>
7#include <linux/sched.h>
8#include <linux/highmem.h>
9#include <linux/module.h>
10#include <linux/slab.h>
11#include <asm/uaccess.h>
12#include <asm/processor.h>
13#include <asm/tlbflush.h>
Zachary Amsdenc9b02a22005-09-03 15:56:40 -070014#include <asm/pgalloc.h>
Dave Jonesf8af0952006-01-06 00:12:10 -080015#include <asm/sections.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016
17static DEFINE_SPINLOCK(cpa_lock);
18static struct list_head df_list = LIST_HEAD_INIT(df_list);
19
20
21pte_t *lookup_address(unsigned long address)
22{
23 pgd_t *pgd = pgd_offset_k(address);
24 pud_t *pud;
25 pmd_t *pmd;
26 if (pgd_none(*pgd))
27 return NULL;
28 pud = pud_offset(pgd, address);
29 if (pud_none(*pud))
30 return NULL;
31 pmd = pmd_offset(pud, address);
32 if (pmd_none(*pmd))
33 return NULL;
34 if (pmd_large(*pmd))
35 return (pte_t *)pmd;
36 return pte_offset_kernel(pmd, address);
37}
38
Dave Jonesf8af0952006-01-06 00:12:10 -080039static struct page *split_large_page(unsigned long address, pgprot_t prot,
40 pgprot_t ref_prot)
Linus Torvalds1da177e2005-04-16 15:20:36 -070041{
42 int i;
43 unsigned long addr;
44 struct page *base;
45 pte_t *pbase;
46
47 spin_unlock_irq(&cpa_lock);
48 base = alloc_pages(GFP_KERNEL, 0);
49 spin_lock_irq(&cpa_lock);
50 if (!base)
51 return NULL;
52
Nick Piggin84d1c052006-03-22 00:08:31 -080053 /*
54 * page_private is used to track the number of entries in
55 * the page table page that have non standard attributes.
56 */
57 SetPagePrivate(base);
58 page_private(base) = 0;
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060 address = __pa(address);
61 addr = address & LARGE_PAGE_MASK;
62 pbase = (pte_t *)page_address(base);
Zachary Amsdenc119ecc2007-02-13 13:26:21 +010063 paravirt_alloc_pt(page_to_pfn(base));
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
Zachary Amsdenc9b02a22005-09-03 15:56:40 -070065 set_pte(&pbase[i], pfn_pte(addr >> PAGE_SHIFT,
Dave Jonesf8af0952006-01-06 00:12:10 -080066 addr == address ? prot : ref_prot));
Linus Torvalds1da177e2005-04-16 15:20:36 -070067 }
68 return base;
69}
70
Andi Kleen3760dd62006-12-07 02:14:05 +010071static void flush_kernel_map(void *arg)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
Andi Kleen3760dd62006-12-07 02:14:05 +010073 unsigned long adr = (unsigned long)arg;
74
75 if (adr && cpu_has_clflush) {
76 int i;
77 for (i = 0; i < PAGE_SIZE; i += boot_cpu_data.x86_clflush_size)
78 asm volatile("clflush (%0)" :: "r" (adr + i));
79 } else if (boot_cpu_data.x86_model >= 4)
Zachary Amsden4bb0d3e2005-09-03 15:56:36 -070080 wbinvd();
Andi Kleen3760dd62006-12-07 02:14:05 +010081
Linus Torvalds1da177e2005-04-16 15:20:36 -070082 /* Flush all to work around Errata in early athlons regarding
83 * large page flushing.
84 */
85 __flush_tlb_all();
86}
87
88static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
89{
90 struct page *page;
91 unsigned long flags;
92
93 set_pte_atomic(kpte, pte); /* change init_mm */
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +020094 if (SHARED_KERNEL_PMD)
Linus Torvalds1da177e2005-04-16 15:20:36 -070095 return;
96
97 spin_lock_irqsave(&pgd_lock, flags);
98 for (page = pgd_list; page; page = (struct page *)page->index) {
99 pgd_t *pgd;
100 pud_t *pud;
101 pmd_t *pmd;
102 pgd = (pgd_t *)page_address(page) + pgd_index(address);
103 pud = pud_offset(pgd, address);
104 pmd = pmd_offset(pud, address);
105 set_pte_atomic((pte_t *)pmd, pte);
106 }
107 spin_unlock_irqrestore(&pgd_lock, flags);
108}
109
110/*
111 * No more special protections in this 2/4MB area - revert to a
112 * large page again.
113 */
114static inline void revert_page(struct page *kpte_page, unsigned long address)
115{
Dave Jonesf8af0952006-01-06 00:12:10 -0800116 pgprot_t ref_prot;
117 pte_t *linear;
118
119 ref_prot =
120 ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
121 ? PAGE_KERNEL_LARGE_EXEC : PAGE_KERNEL_LARGE;
122
123 linear = (pte_t *)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 pmd_offset(pud_offset(pgd_offset_k(address), address), address);
125 set_pmd_pte(linear, address,
126 pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
Dave Jonesf8af0952006-01-06 00:12:10 -0800127 ref_prot));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128}
129
130static int
131__change_page_attr(struct page *page, pgprot_t prot)
132{
133 pte_t *kpte;
134 unsigned long address;
135 struct page *kpte_page;
136
137 BUG_ON(PageHighMem(page));
138 address = (unsigned long)page_address(page);
139
140 kpte = lookup_address(address);
141 if (!kpte)
142 return -EINVAL;
143 kpte_page = virt_to_page(kpte);
144 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
Jan Beulichd01ad8d2007-05-02 19:27:10 +0200145 if (!pte_huge(*kpte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146 set_pte_atomic(kpte, mk_pte(page, prot));
147 } else {
Dave Jonesf8af0952006-01-06 00:12:10 -0800148 pgprot_t ref_prot;
149 struct page *split;
150
151 ref_prot =
152 ((address & LARGE_PAGE_MASK) < (unsigned long)&_etext)
153 ? PAGE_KERNEL_EXEC : PAGE_KERNEL;
154 split = split_large_page(address, prot, ref_prot);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 if (!split)
156 return -ENOMEM;
Dave Jonesf8af0952006-01-06 00:12:10 -0800157 set_pmd_pte(kpte,address,mk_pte(split, ref_prot));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 kpte_page = split;
Nick Piggin84d1c052006-03-22 00:08:31 -0800159 }
160 page_private(kpte_page)++;
Jan Beulichd01ad8d2007-05-02 19:27:10 +0200161 } else if (!pte_huge(*kpte)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
Nick Piggin84d1c052006-03-22 00:08:31 -0800163 BUG_ON(page_private(kpte_page) == 0);
164 page_private(kpte_page)--;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 } else
166 BUG();
167
168 /*
169 * If the pte was reserved, it means it was created at boot
170 * time (not via split_large_page) and in turn we must not
171 * replace it with a largepage.
172 */
173 if (!PageReserved(kpte_page)) {
Nick Piggin84d1c052006-03-22 00:08:31 -0800174 if (cpu_has_pse && (page_private(kpte_page) == 0)) {
175 ClearPagePrivate(kpte_page);
Zachary Amsdenc119ecc2007-02-13 13:26:21 +0100176 paravirt_release_pt(page_to_pfn(kpte_page));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177 list_add(&kpte_page->lru, &df_list);
178 revert_page(kpte_page, address);
179 }
180 }
181 return 0;
182}
183
Andi Kleen3760dd62006-12-07 02:14:05 +0100184static inline void flush_map(void *adr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700185{
Andi Kleen3760dd62006-12-07 02:14:05 +0100186 on_each_cpu(flush_kernel_map, adr, 1, 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700187}
188
189/*
190 * Change the page attributes of an page in the linear mapping.
191 *
192 * This should be used when a page is mapped with a different caching policy
193 * than write-back somewhere - some CPUs do not like it when mappings with
194 * different caching policies exist. This changes the page attributes of the
195 * in kernel linear mapping too.
196 *
197 * The caller needs to ensure that there are no conflicting mappings elsewhere.
198 * This function only deals with the kernel linear map.
199 *
200 * Caller must call global_flush_tlb() after this.
201 */
202int change_page_attr(struct page *page, int numpages, pgprot_t prot)
203{
204 int err = 0;
205 int i;
206 unsigned long flags;
207
208 spin_lock_irqsave(&cpa_lock, flags);
209 for (i = 0; i < numpages; i++, page++) {
210 err = __change_page_attr(page, prot);
211 if (err)
212 break;
213 }
214 spin_unlock_irqrestore(&cpa_lock, flags);
215 return err;
216}
217
218void global_flush_tlb(void)
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700219{
220 struct list_head l;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 struct page *pg, *next;
222
223 BUG_ON(irqs_disabled());
224
225 spin_lock_irq(&cpa_lock);
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700226 list_replace_init(&df_list, &l);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227 spin_unlock_irq(&cpa_lock);
Andi Kleen3760dd62006-12-07 02:14:05 +0100228 if (!cpu_has_clflush)
Al Viro11718b4d2007-02-09 16:39:20 +0000229 flush_map(NULL);
Andi Kleen3760dd62006-12-07 02:14:05 +0100230 list_for_each_entry_safe(pg, next, &l, lru) {
231 if (cpu_has_clflush)
232 flush_map(page_address(pg));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 __free_page(pg);
Andi Kleen3760dd62006-12-07 02:14:05 +0100234 }
Oleg Nesterov626ab0e2006-06-23 02:05:55 -0700235}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236
237#ifdef CONFIG_DEBUG_PAGEALLOC
238void kernel_map_pages(struct page *page, int numpages, int enable)
239{
240 if (PageHighMem(page))
241 return;
Ingo Molnarde5097c2006-01-09 15:59:21 -0800242 if (!enable)
Ingo Molnarf9b84042006-06-27 02:54:49 -0700243 debug_check_no_locks_freed(page_address(page),
244 numpages * PAGE_SIZE);
Ingo Molnarde5097c2006-01-09 15:59:21 -0800245
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 /* the return value is ignored - the calls cannot fail,
247 * large pages are disabled at boot time.
248 */
249 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
250 /* we should perform an IPI and flush all tlbs,
251 * but that can deadlock->flush only current cpu.
252 */
253 __flush_tlb_all();
254}
255#endif
256
257EXPORT_SYMBOL(change_page_attr);
258EXPORT_SYMBOL(global_flush_tlb);