blob: 5c4ee422590e5dc23aec0071e642bf246b627565 [file] [log] [blame]
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07001#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09002#include <linux/gfp.h>
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07003#include <asm/pgalloc.h>
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -07004#include <asm/pgtable.h>
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07005#include <asm/tlb.h>
Ingo Molnara1d5a862008-06-20 15:34:46 +02006#include <asm/fixmap.h>
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07007
Vegard Nossum9e730232009-02-22 11:28:25 +01008#define PGALLOC_GFP GFP_KERNEL | __GFP_NOTRACK | __GFP_REPEAT | __GFP_ZERO
9
Ian Campbell14315592010-02-17 10:38:10 +000010#ifdef CONFIG_HIGHPTE
11#define PGALLOC_USER_GFP __GFP_HIGHMEM
12#else
13#define PGALLOC_USER_GFP 0
14#endif
15
16gfp_t __userpte_alloc_gfp = PGALLOC_GFP | PGALLOC_USER_GFP;
17
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070018pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
19{
Vegard Nossum9e730232009-02-22 11:28:25 +010020 return (pte_t *)__get_free_page(PGALLOC_GFP);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070021}
22
23pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
24{
25 struct page *pte;
26
Ian Campbell14315592010-02-17 10:38:10 +000027 pte = alloc_pages(__userpte_alloc_gfp, 0);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070028 if (pte)
29 pgtable_page_ctor(pte);
30 return pte;
31}
32
Ian Campbell14315592010-02-17 10:38:10 +000033static int __init setup_userpte(char *arg)
34{
35 if (!arg)
36 return -EINVAL;
37
38 /*
39 * "userpte=nohigh" disables allocation of user pagetables in
40 * high memory.
41 */
42 if (strcmp(arg, "nohigh") == 0)
43 __userpte_alloc_gfp &= ~__GFP_HIGHMEM;
44 else
45 return -EINVAL;
46 return 0;
47}
48early_param("userpte", setup_userpte);
49
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100050void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070051{
52 pgtable_page_dtor(pte);
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070053 paravirt_release_pte(page_to_pfn(pte));
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070054 tlb_remove_page(tlb, pte);
55}
56
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070057#if PAGETABLE_LEVELS > 2
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100058void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070059{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070060 paravirt_release_pmd(__pa(pmd) >> PAGE_SHIFT);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070061 tlb_remove_page(tlb, virt_to_page(pmd));
62}
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070063
64#if PAGETABLE_LEVELS > 3
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100065void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud)
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070066{
Jeremy Fitzhardinge2761fa02008-03-17 16:37:02 -070067 paravirt_release_pud(__pa(pud) >> PAGE_SHIFT);
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070068 tlb_remove_page(tlb, virt_to_page(pud));
69}
70#endif /* PAGETABLE_LEVELS > 3 */
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070071#endif /* PAGETABLE_LEVELS > 2 */
72
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070073static inline void pgd_list_add(pgd_t *pgd)
74{
75 struct page *page = virt_to_page(pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070076
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070077 list_add(&page->lru, &pgd_list);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070078}
79
80static inline void pgd_list_del(pgd_t *pgd)
81{
82 struct page *page = virt_to_page(pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070083
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070084 list_del(&page->lru);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070085}
86
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070087#define UNSHARED_PTRS_PER_PGD \
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -070088 (SHARED_KERNEL_PMD ? KERNEL_PGD_BOUNDARY : PTRS_PER_PGD)
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070089
Jan Beulich17b74622008-08-29 12:51:32 +010090static void pgd_ctor(pgd_t *pgd)
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070091{
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070092 /* If the pgd points to a shared pagetable level (either the
93 ptes in non-PAE, or shared PMD in PAE), then just copy the
94 references from swapper_pg_dir. */
95 if (PAGETABLE_LEVELS == 2 ||
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -070096 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD) ||
97 PAGETABLE_LEVELS == 4) {
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -070098 clone_pgd_range(pgd + KERNEL_PGD_BOUNDARY,
99 swapper_pg_dir + KERNEL_PGD_BOUNDARY,
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700100 KERNEL_PGD_PTRS);
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700101 paravirt_alloc_pmd_clone(__pa(pgd) >> PAGE_SHIFT,
102 __pa(swapper_pg_dir) >> PAGE_SHIFT,
Jeremy Fitzhardinge68db0652008-03-17 16:37:13 -0700103 KERNEL_PGD_BOUNDARY,
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700104 KERNEL_PGD_PTRS);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700105 }
106
107 /* list required to sync kernel mapping updates */
108 if (!SHARED_KERNEL_PMD)
109 pgd_list_add(pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700110}
111
Jan Beulich17b74622008-08-29 12:51:32 +0100112static void pgd_dtor(pgd_t *pgd)
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700113{
114 unsigned long flags; /* can be called from interrupt context */
115
116 if (SHARED_KERNEL_PMD)
117 return;
118
119 spin_lock_irqsave(&pgd_lock, flags);
120 pgd_list_del(pgd);
121 spin_unlock_irqrestore(&pgd_lock, flags);
122}
123
Jeremy Fitzhardinge85958b42008-03-17 16:37:14 -0700124/*
125 * List of all pgd's needed for non-PAE so it can invalidate entries
126 * in both cached and uncached pgd's; not needed for PAE since the
127 * kernel pmd is shared. If PAE were not to share the pmd a similar
128 * tactic would be needed. This is essentially codepath-based locking
129 * against pageattr.c; it is the unique case in which a valid change
130 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
131 * vmalloc faults work because attached pagetables are never freed.
132 * -- wli
133 */
134
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700135#ifdef CONFIG_X86_PAE
136/*
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700137 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
138 * updating the top-level pagetable entries to guarantee the
139 * processor notices the update. Since this is expensive, and
140 * all 4 top-level entries are used almost immediately in a
141 * new process's life, we just pre-populate them here.
142 *
143 * Also, if we're in a paravirt environment where the kernel pmd is
144 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
145 * and initialize the kernel pmds here.
146 */
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400147#define PREALLOCATED_PMDS UNSHARED_PTRS_PER_PGD
Ingo Molnar1ec1fe72008-03-19 20:30:40 +0100148
149void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
150{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700151 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
Ingo Molnar1ec1fe72008-03-19 20:30:40 +0100152
153 /* Note: almost everything apart from _PAGE_PRESENT is
154 reserved at the pmd (PDPT) level. */
155 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
156
157 /*
158 * According to Intel App note "TLBs, Paging-Structure Caches,
159 * and Their Invalidation", April 2007, document 317080-001,
160 * section 8.1: in PAE mode we explicitly have to flush the
161 * TLB via cr3 if the top-level pgd is changed...
162 */
163 if (mm == current->active_mm)
164 write_cr3(read_cr3());
165}
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700166#else /* !CONFIG_X86_PAE */
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400167
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700168/* No need to prepopulate any pagetable entries in non-PAE modes. */
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400169#define PREALLOCATED_PMDS 0
170
171#endif /* CONFIG_X86_PAE */
172
173static void free_pmds(pmd_t *pmds[])
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700174{
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400175 int i;
176
177 for(i = 0; i < PREALLOCATED_PMDS; i++)
178 if (pmds[i])
179 free_page((unsigned long)pmds[i]);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700180}
181
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400182static int preallocate_pmds(pmd_t *pmds[])
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700183{
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400184 int i;
185 bool failed = false;
186
187 for(i = 0; i < PREALLOCATED_PMDS; i++) {
Vegard Nossum9e730232009-02-22 11:28:25 +0100188 pmd_t *pmd = (pmd_t *)__get_free_page(PGALLOC_GFP);
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400189 if (pmd == NULL)
190 failed = true;
191 pmds[i] = pmd;
192 }
193
194 if (failed) {
195 free_pmds(pmds);
196 return -ENOMEM;
197 }
198
199 return 0;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700200}
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400201
202/*
203 * Mop up any pmd pages which may still be attached to the pgd.
204 * Normally they will be freed by munmap/exit_mmap, but any pmd we
205 * preallocate which never got a corresponding vma will need to be
206 * freed manually.
207 */
208static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
209{
210 int i;
211
212 for(i = 0; i < PREALLOCATED_PMDS; i++) {
213 pgd_t pgd = pgdp[i];
214
215 if (pgd_val(pgd) != 0) {
216 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
217
218 pgdp[i] = native_make_pgd(0);
219
220 paravirt_release_pmd(pgd_val(pgd) >> PAGE_SHIFT);
221 pmd_free(mm, pmd);
222 }
223 }
224}
225
226static void pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd, pmd_t *pmds[])
227{
228 pud_t *pud;
229 unsigned long addr;
230 int i;
231
Jeremy Fitzhardingecf3e5052008-08-08 13:46:07 -0700232 if (PREALLOCATED_PMDS == 0) /* Work around gcc-3.4.x bug */
233 return;
234
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400235 pud = pud_offset(pgd, 0);
236
237 for (addr = i = 0; i < PREALLOCATED_PMDS;
238 i++, pud++, addr += PUD_SIZE) {
239 pmd_t *pmd = pmds[i];
240
241 if (i >= KERNEL_PGD_BOUNDARY)
242 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
243 sizeof(pmd_t) * PTRS_PER_PMD);
244
245 pud_populate(mm, pud, pmd);
246 }
247}
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700248
249pgd_t *pgd_alloc(struct mm_struct *mm)
250{
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400251 pgd_t *pgd;
252 pmd_t *pmds[PREALLOCATED_PMDS];
253 unsigned long flags;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700254
Vegard Nossum9e730232009-02-22 11:28:25 +0100255 pgd = (pgd_t *)__get_free_page(PGALLOC_GFP);
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400256
257 if (pgd == NULL)
258 goto out;
259
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700260 mm->pgd = pgd;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700261
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400262 if (preallocate_pmds(pmds) != 0)
263 goto out_free_pgd;
264
265 if (paravirt_pgd_alloc(mm) != 0)
266 goto out_free_pmds;
267
268 /*
269 * Make sure that pre-populating the pmds is atomic with
270 * respect to anything walking the pgd_list, so that they
271 * never see a partially populated pgd.
272 */
273 spin_lock_irqsave(&pgd_lock, flags);
274
275 pgd_ctor(pgd);
276 pgd_prepopulate_pmd(mm, pgd, pmds);
277
278 spin_unlock_irqrestore(&pgd_lock, flags);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700279
280 return pgd;
Jeremy Fitzhardinged8d59002008-06-25 00:19:13 -0400281
282out_free_pmds:
283 free_pmds(pmds);
284out_free_pgd:
285 free_page((unsigned long)pgd);
286out:
287 return NULL;
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700288}
289
290void pgd_free(struct mm_struct *mm, pgd_t *pgd)
291{
292 pgd_mop_up_pmds(mm, pgd);
293 pgd_dtor(pgd);
Jeremy Fitzhardingeeba00452008-06-25 00:19:12 -0400294 paravirt_pgd_free(mm, pgd);
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700295 free_page((unsigned long)pgd);
296}
Jeremy Fitzhardingeee5aa8d2008-03-17 16:37:03 -0700297
298int ptep_set_access_flags(struct vm_area_struct *vma,
299 unsigned long address, pte_t *ptep,
300 pte_t entry, int dirty)
301{
302 int changed = !pte_same(*ptep, entry);
303
304 if (changed && dirty) {
305 *ptep = entry;
306 pte_update_defer(vma->vm_mm, address, ptep);
307 flush_tlb_page(vma, address);
308 }
309
310 return changed;
311}
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700312
313int ptep_test_and_clear_young(struct vm_area_struct *vma,
314 unsigned long addr, pte_t *ptep)
315{
316 int ret = 0;
317
318 if (pte_young(*ptep))
319 ret = test_and_clear_bit(_PAGE_BIT_ACCESSED,
Thomas Gleixner48e23952008-05-24 17:24:34 +0200320 (unsigned long *) &ptep->pte);
Jeremy Fitzhardingef9fbf1a2008-03-17 16:37:04 -0700321
322 if (ret)
323 pte_update(vma->vm_mm, addr, ptep);
324
325 return ret;
326}
Jeremy Fitzhardingec20311e2008-03-17 16:37:05 -0700327
328int ptep_clear_flush_young(struct vm_area_struct *vma,
329 unsigned long address, pte_t *ptep)
330{
331 int young;
332
333 young = ptep_test_and_clear_young(vma, address, ptep);
334 if (young)
335 flush_tlb_page(vma, address);
336
337 return young;
338}
Jeremy Fitzhardinge7c7e6e02008-06-17 11:41:54 -0700339
Gustavo F. Padovanfd862dd2009-02-15 21:48:54 -0300340/**
341 * reserve_top_address - reserves a hole in the top of kernel address space
342 * @reserve - size of hole to reserve
343 *
344 * Can be used to relocate the fixmap area and poke a hole in the top
345 * of kernel address space to make room for a hypervisor.
346 */
347void __init reserve_top_address(unsigned long reserve)
348{
349#ifdef CONFIG_X86_32
350 BUG_ON(fixmaps_set > 0);
351 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
352 (int)-reserve);
353 __FIXADDR_TOP = -reserve - PAGE_SIZE;
Gustavo F. Padovanfd862dd2009-02-15 21:48:54 -0300354#endif
355}
356
Jeremy Fitzhardinge7c7e6e02008-06-17 11:41:54 -0700357int fixmaps_set;
358
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700359void __native_set_fixmap(enum fixed_addresses idx, pte_t pte)
Jeremy Fitzhardinge7c7e6e02008-06-17 11:41:54 -0700360{
361 unsigned long address = __fix_to_virt(idx);
362
363 if (idx >= __end_of_fixed_addresses) {
364 BUG();
365 return;
366 }
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700367 set_pte_vaddr(address, pte);
Jeremy Fitzhardinge7c7e6e02008-06-17 11:41:54 -0700368 fixmaps_set++;
369}
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700370
Masami Hiramatsu3b3809a2009-04-09 10:55:33 -0700371void native_set_fixmap(enum fixed_addresses idx, phys_addr_t phys,
372 pgprot_t flags)
Jeremy Fitzhardingeaeaaa592008-06-17 11:42:01 -0700373{
374 __native_set_fixmap(idx, pfn_pte(phys >> PAGE_SHIFT, flags));
375}