blob: 5accc08683c7137b6e00039da794a39e103f6756 [file] [log] [blame]
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07001#include <linux/mm.h>
2#include <asm/pgalloc.h>
3#include <asm/tlb.h>
4
5pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
6{
7 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
8}
9
10pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
11{
12 struct page *pte;
13
14#ifdef CONFIG_HIGHPTE
15 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
16#else
17 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
18#endif
19 if (pte)
20 pgtable_page_ctor(pte);
21 return pte;
22}
23
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070024void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
25{
26 pgtable_page_dtor(pte);
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070027 paravirt_release_pte(page_to_pfn(pte));
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070028 tlb_remove_page(tlb, pte);
29}
30
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070031#if PAGETABLE_LEVELS > 2
32void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
33{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070034 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070035 tlb_remove_page(tlb, virt_to_page(pmd));
36}
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070037
38#if PAGETABLE_LEVELS > 3
39void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
40{
Jeremy Fitzhardinge2761fa02008-03-17 16:37:02 -070041 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070042 tlb_remove_page(tlb, virt_to_page(pud));
43}
44#endif /* PAGETABLE_LEVELS > 3 */
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070045#endif /* PAGETABLE_LEVELS > 2 */
46
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070047static inline void pgd_list_add(pgd_t *pgd)
48{
49 struct page *page = virt_to_page(pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070050
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070051 list_add(&page->lru, &pgd_list);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070052}
53
54static inline void pgd_list_del(pgd_t *pgd)
55{
56 struct page *page = virt_to_page(pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070057
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070058 list_del(&page->lru);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070059}
60
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070061#ifdef CONFIG_X86_64
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070062pgd_t *pgd_alloc(struct mm_struct *mm)
63{
64 unsigned boundary;
65 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070066 unsigned long flags;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070067 if (!pgd)
68 return NULL;
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070069 spin_lock_irqsave(&pgd_lock, flags);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070070 pgd_list_add(pgd);
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070071 spin_unlock_irqrestore(&pgd_lock, flags);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070072 /*
73 * Copy kernel pointers in from init.
74 * Could keep a freelist or slab cache of those because the kernel
75 * part never changes.
76 */
77 boundary = pgd_index(__PAGE_OFFSET);
78 memset(pgd, 0, boundary * sizeof(pgd_t));
79 memcpy(pgd + boundary,
80 init_level4_pgt + boundary,
81 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
82 return pgd;
83}
84
85void pgd_free(struct mm_struct *mm, pgd_t *pgd)
86{
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070087 unsigned long flags;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070088 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070089 spin_lock_irqsave(&pgd_lock, flags);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070090 pgd_list_del(pgd);
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070091 spin_unlock_irqrestore(&pgd_lock, flags);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070092 free_page((unsigned long)pgd);
93}
94#else
95/*
96 * List of all pgd's needed for non-PAE so it can invalidate entries
97 * in both cached and uncached pgd's; not needed for PAE since the
98 * kernel pmd is shared. If PAE were not to share the pmd a similar
99 * tactic would be needed. This is essentially codepath-based locking
100 * against pageattr.c; it is the unique case in which a valid change
101 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
102 * vmalloc faults work because attached pagetables are never freed.
103 * -- wli
104 */
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700105#define UNSHARED_PTRS_PER_PGD \
106 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
107
108static void pgd_ctor(void *p)
109{
110 pgd_t *pgd = p;
111 unsigned long flags;
112
113 /* Clear usermode parts of PGD */
114 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
115
116 spin_lock_irqsave(&pgd_lock, flags);
117
118 /* If the pgd points to a shared pagetable level (either the
119 ptes in non-PAE, or shared PMD in PAE), then just copy the
120 references from swapper_pg_dir. */
121 if (PAGETABLE_LEVELS == 2 ||
122 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
123 clone_pgd_range(pgd + USER_PTRS_PER_PGD,
124 swapper_pg_dir + USER_PTRS_PER_PGD,
125 KERNEL_PGD_PTRS);
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700126 paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
127 __pa(swapper_pg_dir) >> PAGE_SHIFT,
128 USER_PTRS_PER_PGD,
129 KERNEL_PGD_PTRS);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700130 }
131
132 /* list required to sync kernel mapping updates */
133 if (!SHARED_KERNEL_PMD)
134 pgd_list_add(pgd);
135
136 spin_unlock_irqrestore(&pgd_lock, flags);
137}
138
139static void pgd_dtor(void *pgd)
140{
141 unsigned long flags; /* can be called from interrupt context */
142
143 if (SHARED_KERNEL_PMD)
144 return;
145
146 spin_lock_irqsave(&pgd_lock, flags);
147 pgd_list_del(pgd);
148 spin_unlock_irqrestore(&pgd_lock, flags);
149}
150
151#ifdef CONFIG_X86_PAE
152/*
153 * Mop up any pmd pages which may still be attached to the pgd.
154 * Normally they will be freed by munmap/exit_mmap, but any pmd we
155 * preallocate which never got a corresponding vma will need to be
156 * freed manually.
157 */
158static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
159{
160 int i;
161
162 for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
163 pgd_t pgd = pgdp[i];
164
165 if (pgd_val(pgd) != 0) {
166 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
167
168 pgdp[i] = native_make_pgd(0);
169
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700170 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700171 pmd_free(mm, pmd);
172 }
173 }
174}
175
176/*
177 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
178 * updating the top-level pagetable entries to guarantee the
179 * processor notices the update. Since this is expensive, and
180 * all 4 top-level entries are used almost immediately in a
181 * new process's life, we just pre-populate them here.
182 *
183 * Also, if we're in a paravirt environment where the kernel pmd is
184 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
185 * and initialize the kernel pmds here.
186 */
187static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
188{
189 pud_t *pud;
190 unsigned long addr;
191 int i;
192
193 pud = pud_offset(pgd, 0);
194 for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
195 i++, pud++, addr += PUD_SIZE) {
196 pmd_t *pmd = pmd_alloc_one(mm, addr);
197
198 if (!pmd) {
199 pgd_mop_up_pmds(mm, pgd);
200 return 0;
201 }
202
203 if (i >= USER_PTRS_PER_PGD)
204 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
205 sizeof(pmd_t) * PTRS_PER_PMD);
206
207 pud_populate(mm, pud, pmd);
208 }
209
210 return 1;
211}
Ingo Molnar1ec1fe72008-03-19 20:30:40 +0100212
213void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
214{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700215 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
Ingo Molnar1ec1fe72008-03-19 20:30:40 +0100216
217 /* Note: almost everything apart from _PAGE_PRESENT is
218 reserved at the pmd (PDPT) level. */
219 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
220
221 /*
222 * According to Intel App note "TLBs, Paging-Structure Caches,
223 * and Their Invalidation", April 2007, document 317080-001,
224 * section 8.1: in PAE mode we explicitly have to flush the
225 * TLB via cr3 if the top-level pgd is changed...
226 */
227 if (mm == current->active_mm)
228 write_cr3(read_cr3());
229}
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700230#else /* !CONFIG_X86_PAE */
231/* No need to prepopulate any pagetable entries in non-PAE modes. */
232static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
233{
234 return 1;
235}
236
237static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd)
238{
239}
240#endif /* CONFIG_X86_PAE */
241
242pgd_t *pgd_alloc(struct mm_struct *mm)
243{
244 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
245
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700246 /* so that alloc_pmd can use it */
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700247 mm->pgd = pgd;
248 if (pgd)
249 pgd_ctor(pgd);
250
251 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
252 pgd_dtor(pgd);
253 free_page((unsigned long)pgd);
254 pgd = NULL;
255 }
256
257 return pgd;
258}
259
260void pgd_free(struct mm_struct *mm, pgd_t *pgd)
261{
262 pgd_mop_up_pmds(mm, pgd);
263 pgd_dtor(pgd);
264 free_page((unsigned long)pgd);
265}
266#endif