blob: c67966e10a95012ab9ca60eed4d027eb0113bac9 [file] [log] [blame]
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07001#include <linux/mm.h>
2#include <asm/pgalloc.h>
3#include <asm/tlb.h>
4
5pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
6{
7 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
8}
9
10pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
11{
12 struct page *pte;
13
14#ifdef CONFIG_HIGHPTE
15 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
16#else
17 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
18#endif
19 if (pte)
20 pgtable_page_ctor(pte);
21 return pte;
22}
23
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070024void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
25{
26 pgtable_page_dtor(pte);
27 paravirt_release_pt(page_to_pfn(pte));
28 tlb_remove_page(tlb, pte);
29}
30
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070031#if PAGETABLE_LEVELS > 2
32void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
33{
34 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
35 tlb_remove_page(tlb, virt_to_page(pmd));
36}
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070037
38#if PAGETABLE_LEVELS > 3
39void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
40{
41 tlb_remove_page(tlb, virt_to_page(pud));
42}
43#endif /* PAGETABLE_LEVELS > 3 */
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070044#endif /* PAGETABLE_LEVELS > 2 */
45
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070046#ifdef CONFIG_X86_64
47static inline void pgd_list_add(pgd_t *pgd)
48{
49 struct page *page = virt_to_page(pgd);
50 unsigned long flags;
51
52 spin_lock_irqsave(&pgd_lock, flags);
53 list_add(&page->lru, &pgd_list);
54 spin_unlock_irqrestore(&pgd_lock, flags);
55}
56
57static inline void pgd_list_del(pgd_t *pgd)
58{
59 struct page *page = virt_to_page(pgd);
60 unsigned long flags;
61
62 spin_lock_irqsave(&pgd_lock, flags);
63 list_del(&page->lru);
64 spin_unlock_irqrestore(&pgd_lock, flags);
65}
66
67pgd_t *pgd_alloc(struct mm_struct *mm)
68{
69 unsigned boundary;
70 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
71 if (!pgd)
72 return NULL;
73 pgd_list_add(pgd);
74 /*
75 * Copy kernel pointers in from init.
76 * Could keep a freelist or slab cache of those because the kernel
77 * part never changes.
78 */
79 boundary = pgd_index(__PAGE_OFFSET);
80 memset(pgd, 0, boundary * sizeof(pgd_t));
81 memcpy(pgd + boundary,
82 init_level4_pgt + boundary,
83 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
84 return pgd;
85}
86
87void pgd_free(struct mm_struct *mm, pgd_t *pgd)
88{
89 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
90 pgd_list_del(pgd);
91 free_page((unsigned long)pgd);
92}
93#else
94/*
95 * List of all pgd's needed for non-PAE so it can invalidate entries
96 * in both cached and uncached pgd's; not needed for PAE since the
97 * kernel pmd is shared. If PAE were not to share the pmd a similar
98 * tactic would be needed. This is essentially codepath-based locking
99 * against pageattr.c; it is the unique case in which a valid change
100 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
101 * vmalloc faults work because attached pagetables are never freed.
102 * -- wli
103 */
104static inline void pgd_list_add(pgd_t *pgd)
105{
106 struct page *page = virt_to_page(pgd);
107
108 list_add(&page->lru, &pgd_list);
109}
110
111static inline void pgd_list_del(pgd_t *pgd)
112{
113 struct page *page = virt_to_page(pgd);
114
115 list_del(&page->lru);
116}
117
118#define UNSHARED_PTRS_PER_PGD \
119 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
120
121static void pgd_ctor(void *p)
122{
123 pgd_t *pgd = p;
124 unsigned long flags;
125
126 /* Clear usermode parts of PGD */
127 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
128
129 spin_lock_irqsave(&pgd_lock, flags);
130
131 /* If the pgd points to a shared pagetable level (either the
132 ptes in non-PAE, or shared PMD in PAE), then just copy the
133 references from swapper_pg_dir. */
134 if (PAGETABLE_LEVELS == 2 ||
135 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
136 clone_pgd_range(pgd + USER_PTRS_PER_PGD,
137 swapper_pg_dir + USER_PTRS_PER_PGD,
138 KERNEL_PGD_PTRS);
139 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
140 __pa(swapper_pg_dir) >> PAGE_SHIFT,
141 USER_PTRS_PER_PGD,
142 KERNEL_PGD_PTRS);
143 }
144
145 /* list required to sync kernel mapping updates */
146 if (!SHARED_KERNEL_PMD)
147 pgd_list_add(pgd);
148
149 spin_unlock_irqrestore(&pgd_lock, flags);
150}
151
152static void pgd_dtor(void *pgd)
153{
154 unsigned long flags; /* can be called from interrupt context */
155
156 if (SHARED_KERNEL_PMD)
157 return;
158
159 spin_lock_irqsave(&pgd_lock, flags);
160 pgd_list_del(pgd);
161 spin_unlock_irqrestore(&pgd_lock, flags);
162}
163
164#ifdef CONFIG_X86_PAE
165/*
166 * Mop up any pmd pages which may still be attached to the pgd.
167 * Normally they will be freed by munmap/exit_mmap, but any pmd we
168 * preallocate which never got a corresponding vma will need to be
169 * freed manually.
170 */
171static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
172{
173 int i;
174
175 for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
176 pgd_t pgd = pgdp[i];
177
178 if (pgd_val(pgd) != 0) {
179 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
180
181 pgdp[i] = native_make_pgd(0);
182
183 paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
184 pmd_free(mm, pmd);
185 }
186 }
187}
188
189/*
190 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
191 * updating the top-level pagetable entries to guarantee the
192 * processor notices the update. Since this is expensive, and
193 * all 4 top-level entries are used almost immediately in a
194 * new process's life, we just pre-populate them here.
195 *
196 * Also, if we're in a paravirt environment where the kernel pmd is
197 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
198 * and initialize the kernel pmds here.
199 */
200static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
201{
202 pud_t *pud;
203 unsigned long addr;
204 int i;
205
206 pud = pud_offset(pgd, 0);
207 for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
208 i++, pud++, addr += PUD_SIZE) {
209 pmd_t *pmd = pmd_alloc_one(mm, addr);
210
211 if (!pmd) {
212 pgd_mop_up_pmds(mm, pgd);
213 return 0;
214 }
215
216 if (i >= USER_PTRS_PER_PGD)
217 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
218 sizeof(pmd_t) * PTRS_PER_PMD);
219
220 pud_populate(mm, pud, pmd);
221 }
222
223 return 1;
224}
Ingo Molnar1ec1fe72008-03-19 20:30:40 +0100225
226void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
227{
228 paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
229
230 /* Note: almost everything apart from _PAGE_PRESENT is
231 reserved at the pmd (PDPT) level. */
232 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
233
234 /*
235 * According to Intel App note "TLBs, Paging-Structure Caches,
236 * and Their Invalidation", April 2007, document 317080-001,
237 * section 8.1: in PAE mode we explicitly have to flush the
238 * TLB via cr3 if the top-level pgd is changed...
239 */
240 if (mm == current->active_mm)
241 write_cr3(read_cr3());
242}
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700243#else /* !CONFIG_X86_PAE */
244/* No need to prepopulate any pagetable entries in non-PAE modes. */
245static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
246{
247 return 1;
248}
249
250static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd)
251{
252}
253#endif /* CONFIG_X86_PAE */
254
255pgd_t *pgd_alloc(struct mm_struct *mm)
256{
257 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
258
259 /* so that alloc_pd can use it */
260 mm->pgd = pgd;
261 if (pgd)
262 pgd_ctor(pgd);
263
264 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
265 pgd_dtor(pgd);
266 free_page((unsigned long)pgd);
267 pgd = NULL;
268 }
269
270 return pgd;
271}
272
273void pgd_free(struct mm_struct *mm, pgd_t *pgd)
274{
275 pgd_mop_up_pmds(mm, pgd);
276 pgd_dtor(pgd);
277 free_page((unsigned long)pgd);
278}
279#endif