blob: e2ac320e6151a5d40716ac7e91183ae7f79fe96e [file] [log] [blame]
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07001#include <linux/mm.h>
2#include <asm/pgalloc.h>
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -07003#include <asm/pgtable.h>
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07004#include <asm/tlb.h>
5
6pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
7{
8 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
9}
10
11pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
12{
13 struct page *pte;
14
15#ifdef CONFIG_HIGHPTE
16 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
17#else
18 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
19#endif
20 if (pte)
21 pgtable_page_ctor(pte);
22 return pte;
23}
24
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070025void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
26{
27 pgtable_page_dtor(pte);
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070028 paravirt_release_pte(page_to_pfn(pte));
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070029 tlb_remove_page(tlb, pte);
30}
31
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070032#if PAGETABLE_LEVELS > 2
33void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
34{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070035 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070036 tlb_remove_page(tlb, virt_to_page(pmd));
37}
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070038
39#if PAGETABLE_LEVELS > 3
40void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
41{
Jeremy Fitzhardinge2761fa02008-03-17 16:37:02 -070042 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070043 tlb_remove_page(tlb, virt_to_page(pud));
44}
45#endif /* PAGETABLE_LEVELS > 3 */
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070046#endif /* PAGETABLE_LEVELS > 2 */
47
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070048static inline void pgd_list_add(pgd_t *pgd)
49{
50 struct page *page = virt_to_page(pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070051
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070052 list_add(&page->lru, &pgd_list);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070053}
54
55static inline void pgd_list_del(pgd_t *pgd)
56{
57 struct page *page = virt_to_page(pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070058
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070059 list_del(&page->lru);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070060}
61
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070062#ifdef CONFIG_X86_64
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070063pgd_t *pgd_alloc(struct mm_struct *mm)
64{
65 unsigned boundary;
66 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070067 unsigned long flags;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070068 if (!pgd)
69 return NULL;
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070070 spin_lock_irqsave(&pgd_lock, flags);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070071 pgd_list_add(pgd);
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070072 spin_unlock_irqrestore(&pgd_lock, flags);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070073 /*
74 * Copy kernel pointers in from init.
75 * Could keep a freelist or slab cache of those because the kernel
76 * part never changes.
77 */
78 boundary = pgd_index(__PAGE_OFFSET);
79 memset(pgd, 0, boundary * sizeof(pgd_t));
80 memcpy(pgd + boundary,
81 init_level4_pgt + boundary,
82 (PTRS_PER_PGD - boundary) * sizeof(pgd_t));
83 return pgd;
84}
85
86void pgd_free(struct mm_struct *mm, pgd_t *pgd)
87{
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070088 unsigned long flags;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070089 BUG_ON((unsigned long)pgd & (PAGE_SIZE-1));
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070090 spin_lock_irqsave(&pgd_lock, flags);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070091 pgd_list_del(pgd);
Jeremy Fitzhardinge39415852008-03-17 16:37:00 -070092 spin_unlock_irqrestore(&pgd_lock, flags);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070093 free_page((unsigned long)pgd);
94}
95#else
96/*
97 * List of all pgd's needed for non-PAE so it can invalidate entries
98 * in both cached and uncached pgd's; not needed for PAE since the
99 * kernel pmd is shared. If PAE were not to share the pmd a similar
100 * tactic would be needed. This is essentially codepath-based locking
101 * against pageattr.c; it is the unique case in which a valid change
102 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
103 * vmalloc faults work because attached pagetables are never freed.
104 * -- wli
105 */
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700106#define UNSHARED_PTRS_PER_PGD \
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700107 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700108
109static void pgd_ctor(void *p)
110{
111 pgd_t *pgd = p;
112 unsigned long flags;
113
114 /* Clear usermode parts of PGD */
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700115 memset(pgd, 0, KERNEL_PGD_BOUNDARY*sizeof(pgd_t));
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700116
117 spin_lock_irqsave(&pgd_lock, flags);
118
119 /* If the pgd points to a shared pagetable level (either the
120 ptes in non-PAE, or shared PMD in PAE), then just copy the
121 references from swapper_pg_dir. */
122 if (PAGETABLE_LEVELS == 2 ||
123 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700124 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
125 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700126 KERNEL_PGD_PTRS);
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700127 paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
128 __pa(swapper_pg_dir) >> PAGE_SHIFT,
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700129 KERNEL_PGD_BOUNDARY,
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700130 KERNEL_PGD_PTRS);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700131 }
132
133 /* list required to sync kernel mapping updates */
134 if (!SHARED_KERNEL_PMD)
135 pgd_list_add(pgd);
136
137 spin_unlock_irqrestore(&pgd_lock, flags);
138}
139
140static void pgd_dtor(void *pgd)
141{
142 unsigned long flags; /* can be called from interrupt context */
143
144 if (SHARED_KERNEL_PMD)
145 return;
146
147 spin_lock_irqsave(&pgd_lock, flags);
148 pgd_list_del(pgd);
149 spin_unlock_irqrestore(&pgd_lock, flags);
150}
151
152#ifdef CONFIG_X86_PAE
153/*
154 * Mop up any pmd pages which may still be attached to the pgd.
155 * Normally they will be freed by munmap/exit_mmap, but any pmd we
156 * preallocate which never got a corresponding vma will need to be
157 * freed manually.
158 */
159static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
160{
161 int i;
162
163 for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
164 pgd_t pgd = pgdp[i];
165
166 if (pgd_val(pgd) != 0) {
167 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
168
169 pgdp[i] = native_make_pgd(0);
170
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700171 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700172 pmd_free(mm, pmd);
173 }
174 }
175}
176
177/*
178 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
179 * updating the top-level pagetable entries to guarantee the
180 * processor notices the update. Since this is expensive, and
181 * all 4 top-level entries are used almost immediately in a
182 * new process's life, we just pre-populate them here.
183 *
184 * Also, if we're in a paravirt environment where the kernel pmd is
185 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
186 * and initialize the kernel pmds here.
187 */
188static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
189{
190 pud_t *pud;
191 unsigned long addr;
192 int i;
193
194 pud = pud_offset(pgd, 0);
195 for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
196 i++, pud++, addr += PUD_SIZE) {
197 pmd_t *pmd = pmd_alloc_one(mm, addr);
198
199 if (!pmd) {
200 pgd_mop_up_pmds(mm, pgd);
201 return 0;
202 }
203
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700204 if (i >= KERNEL_PGD_BOUNDARY)
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700205 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
206 sizeof(pmd_t) * PTRS_PER_PMD);
207
208 pud_populate(mm, pud, pmd);
209 }
210
211 return 1;
212}
Ingo Molnar1ec1fe72008-03-19 20:30:40 +0100213
214void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
215{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700216 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
Ingo Molnar1ec1fe72008-03-19 20:30:40 +0100217
218 /* Note: almost everything apart from _PAGE_PRESENT is
219 reserved at the pmd (PDPT) level. */
220 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
221
222 /*
223 * According to Intel App note "TLBs, Paging-Structure Caches,
224 * and Their Invalidation", April 2007, document 317080-001,
225 * section 8.1: in PAE mode we explicitly have to flush the
226 * TLB via cr3 if the top-level pgd is changed...
227 */
228 if (mm == current->active_mm)
229 write_cr3(read_cr3());
230}
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700231#else /* !CONFIG_X86_PAE */
232/* No need to prepopulate any pagetable entries in non-PAE modes. */
233static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
234{
235 return 1;
236}
237
238static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgd)
239{
240}
241#endif /* CONFIG_X86_PAE */
242
243pgd_t *pgd_alloc(struct mm_struct *mm)
244{
245 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
246
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700247 /* so that alloc_pmd can use it */
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700248 mm->pgd = pgd;
249 if (pgd)
250 pgd_ctor(pgd);
251
252 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
253 pgd_dtor(pgd);
254 free_page((unsigned long)pgd);
255 pgd = NULL;
256 }
257
258 return pgd;
259}
260
261void pgd_free(struct mm_struct *mm, pgd_t *pgd)
262{
263 pgd_mop_up_pmds(mm, pgd);
264 pgd_dtor(pgd);
265 free_page((unsigned long)pgd);
266}
267#endif
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -0700268
269int ptep_set_access_flags(struct vm_area_struct *vma,
270 unsigned long address, pte_t *ptep,
271 pte_t entry, int dirty)
272{
273 int changed = !pte_same(*ptep, entry);
274
275 if (changed && dirty) {
276 *ptep = entry;
277 pte_update_defer(vma->vm_mm, address, ptep);
278 flush_tlb_page(vma, address);
279 }
280
281 return changed;
282}
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700283
284int ptep_test_and_clear_young(struct vm_area_struct *vma,
285 unsigned long addr, pte_t *ptep)
286{
287 int ret = 0;
288
289 if (pte_young(*ptep))
290 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
291 &ptep->pte);
292
293 if (ret)
294 pte_update(vma->vm_mm, addr, ptep);
295
296 return ret;
297}
Jeremy Fitzhardingec20311e2008-03-17 16:37:05 -0700298
299int ptep_clear_flush_young(struct vm_area_struct *vma,
300 unsigned long address, pte_t *ptep)
301{
302 int young;
303
304 young = ptep_test_and_clear_young(vma, address, ptep);
305 if (young)
306 flush_tlb_page(vma, address);
307
308 return young;
309}