blob: cb3da6baa704208d4faf2977d411190d9a2aa88f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright 2002 Andi Kleen, SuSE Labs.
3 * Thanks to Ben LaHaise for precious feedback.
4 */
5
6#include <linux/config.h>
7#include <linux/mm.h>
8#include <linux/sched.h>
9#include <linux/highmem.h>
10#include <linux/module.h>
11#include <linux/slab.h>
12#include <asm/uaccess.h>
13#include <asm/processor.h>
14#include <asm/tlbflush.h>
15
16static DEFINE_SPINLOCK(cpa_lock);
17static struct list_head df_list = LIST_HEAD_INIT(df_list);
18
19
20pte_t *lookup_address(unsigned long address)
21{
22 pgd_t *pgd = pgd_offset_k(address);
23 pud_t *pud;
24 pmd_t *pmd;
25 if (pgd_none(*pgd))
26 return NULL;
27 pud = pud_offset(pgd, address);
28 if (pud_none(*pud))
29 return NULL;
30 pmd = pmd_offset(pud, address);
31 if (pmd_none(*pmd))
32 return NULL;
33 if (pmd_large(*pmd))
34 return (pte_t *)pmd;
35 return pte_offset_kernel(pmd, address);
36}
37
38static struct page *split_large_page(unsigned long address, pgprot_t prot)
39{
40 int i;
41 unsigned long addr;
42 struct page *base;
43 pte_t *pbase;
44
45 spin_unlock_irq(&cpa_lock);
46 base = alloc_pages(GFP_KERNEL, 0);
47 spin_lock_irq(&cpa_lock);
48 if (!base)
49 return NULL;
50
51 address = __pa(address);
52 addr = address & LARGE_PAGE_MASK;
53 pbase = (pte_t *)page_address(base);
54 for (i = 0; i < PTRS_PER_PTE; i++, addr += PAGE_SIZE) {
55 pbase[i] = pfn_pte(addr >> PAGE_SHIFT,
56 addr == address ? prot : PAGE_KERNEL);
57 }
58 return base;
59}
60
61static void flush_kernel_map(void *dummy)
62{
63 /* Could use CLFLUSH here if the CPU supports it (Hammer,P4) */
64 if (boot_cpu_data.x86_model >= 4)
65 asm volatile("wbinvd":::"memory");
66 /* Flush all to work around Errata in early athlons regarding
67 * large page flushing.
68 */
69 __flush_tlb_all();
70}
71
72static void set_pmd_pte(pte_t *kpte, unsigned long address, pte_t pte)
73{
74 struct page *page;
75 unsigned long flags;
76
77 set_pte_atomic(kpte, pte); /* change init_mm */
78 if (PTRS_PER_PMD > 1)
79 return;
80
81 spin_lock_irqsave(&pgd_lock, flags);
82 for (page = pgd_list; page; page = (struct page *)page->index) {
83 pgd_t *pgd;
84 pud_t *pud;
85 pmd_t *pmd;
86 pgd = (pgd_t *)page_address(page) + pgd_index(address);
87 pud = pud_offset(pgd, address);
88 pmd = pmd_offset(pud, address);
89 set_pte_atomic((pte_t *)pmd, pte);
90 }
91 spin_unlock_irqrestore(&pgd_lock, flags);
92}
93
94/*
95 * No more special protections in this 2/4MB area - revert to a
96 * large page again.
97 */
98static inline void revert_page(struct page *kpte_page, unsigned long address)
99{
100 pte_t *linear = (pte_t *)
101 pmd_offset(pud_offset(pgd_offset_k(address), address), address);
102 set_pmd_pte(linear, address,
103 pfn_pte((__pa(address) & LARGE_PAGE_MASK) >> PAGE_SHIFT,
104 PAGE_KERNEL_LARGE));
105}
106
107static int
108__change_page_attr(struct page *page, pgprot_t prot)
109{
110 pte_t *kpte;
111 unsigned long address;
112 struct page *kpte_page;
113
114 BUG_ON(PageHighMem(page));
115 address = (unsigned long)page_address(page);
116
117 kpte = lookup_address(address);
118 if (!kpte)
119 return -EINVAL;
120 kpte_page = virt_to_page(kpte);
121 if (pgprot_val(prot) != pgprot_val(PAGE_KERNEL)) {
122 if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
123 set_pte_atomic(kpte, mk_pte(page, prot));
124 } else {
125 struct page *split = split_large_page(address, prot);
126 if (!split)
127 return -ENOMEM;
128 set_pmd_pte(kpte,address,mk_pte(split, PAGE_KERNEL));
129 kpte_page = split;
130 }
131 get_page(kpte_page);
132 } else if ((pte_val(*kpte) & _PAGE_PSE) == 0) {
133 set_pte_atomic(kpte, mk_pte(page, PAGE_KERNEL));
134 __put_page(kpte_page);
135 } else
136 BUG();
137
138 /*
139 * If the pte was reserved, it means it was created at boot
140 * time (not via split_large_page) and in turn we must not
141 * replace it with a largepage.
142 */
143 if (!PageReserved(kpte_page)) {
144 /* memleak and potential failed 2M page regeneration */
145 BUG_ON(!page_count(kpte_page));
146
147 if (cpu_has_pse && (page_count(kpte_page) == 1)) {
148 list_add(&kpte_page->lru, &df_list);
149 revert_page(kpte_page, address);
150 }
151 }
152 return 0;
153}
154
155static inline void flush_map(void)
156{
157 on_each_cpu(flush_kernel_map, NULL, 1, 1);
158}
159
160/*
161 * Change the page attributes of an page in the linear mapping.
162 *
163 * This should be used when a page is mapped with a different caching policy
164 * than write-back somewhere - some CPUs do not like it when mappings with
165 * different caching policies exist. This changes the page attributes of the
166 * in kernel linear mapping too.
167 *
168 * The caller needs to ensure that there are no conflicting mappings elsewhere.
169 * This function only deals with the kernel linear map.
170 *
171 * Caller must call global_flush_tlb() after this.
172 */
173int change_page_attr(struct page *page, int numpages, pgprot_t prot)
174{
175 int err = 0;
176 int i;
177 unsigned long flags;
178
179 spin_lock_irqsave(&cpa_lock, flags);
180 for (i = 0; i < numpages; i++, page++) {
181 err = __change_page_attr(page, prot);
182 if (err)
183 break;
184 }
185 spin_unlock_irqrestore(&cpa_lock, flags);
186 return err;
187}
188
189void global_flush_tlb(void)
190{
191 LIST_HEAD(l);
192 struct page *pg, *next;
193
194 BUG_ON(irqs_disabled());
195
196 spin_lock_irq(&cpa_lock);
197 list_splice_init(&df_list, &l);
198 spin_unlock_irq(&cpa_lock);
199 flush_map();
200 list_for_each_entry_safe(pg, next, &l, lru)
201 __free_page(pg);
202}
203
204#ifdef CONFIG_DEBUG_PAGEALLOC
205void kernel_map_pages(struct page *page, int numpages, int enable)
206{
207 if (PageHighMem(page))
208 return;
209 /* the return value is ignored - the calls cannot fail,
210 * large pages are disabled at boot time.
211 */
212 change_page_attr(page, numpages, enable ? PAGE_KERNEL : __pgprot(0));
213 /* we should perform an IPI and flush all tlbs,
214 * but that can deadlock->flush only current cpu.
215 */
216 __flush_tlb_all();
217}
218#endif
219
220EXPORT_SYMBOL(change_page_attr);
221EXPORT_SYMBOL(global_flush_tlb);