blob: 6bea6e5b5ee55044dd06bea1b55677bf9cba20bf [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _I386_PGALLOC_H
2#define _I386_PGALLOC_H
3
Linus Torvalds1da177e2005-04-16 15:20:36 -07004#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */
Ingo Molnar5aa05082008-01-31 22:05:48 +01006#include <linux/pagemap.h>
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +01007#include <asm/tlb.h>
8#include <asm-generic/tlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -07009
Zachary Amsdenc119ecc2007-02-13 13:26:21 +010010#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h>
12#else
Jeremy Fitzhardingefdb4c332007-07-17 18:37:03 -070013#define paravirt_alloc_pt(mm, pfn) do { } while (0)
Jeremy Fitzhardinge6c435452008-01-30 13:33:39 +010014#define paravirt_alloc_pd(mm, pfn) do { } while (0)
Zachary Amsdenc119ecc2007-02-13 13:26:21 +010015#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
16#define paravirt_release_pt(pfn) do { } while (0)
17#define paravirt_release_pd(pfn) do { } while (0)
18#endif
19
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +010020static inline void pmd_populate_kernel(struct mm_struct *mm,
21 pmd_t *pmd, pte_t *pte)
22{
23 paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT);
24 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
25}
Linus Torvalds1da177e2005-04-16 15:20:36 -070026
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +010027static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd, struct page *pte)
28{
29 unsigned long pfn = page_to_pfn(pte);
30
31 paravirt_alloc_pt(mm, pfn);
32 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
33}
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080034#define pmd_pgtable(pmd) pmd_page(pmd)
Zachary Amsdenc119ecc2007-02-13 13:26:21 +010035
Linus Torvalds1da177e2005-04-16 15:20:36 -070036/*
37 * Allocate and free page tables.
38 */
39extern pgd_t *pgd_alloc(struct mm_struct *);
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080040extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080043extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
Linus Torvalds1da177e2005-04-16 15:20:36 -070044
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080045static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -070046{
47 free_page((unsigned long)pte);
48}
49
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080050static inline void pte_free(struct mm_struct *mm, pgtable_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -070051{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080052 pgtable_page_dtor(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -070053 __free_page(pte);
54}
55
56
Ingo Molnar5aa05082008-01-31 22:05:48 +010057extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -070058
59#ifdef CONFIG_X86_PAE
60/*
61 * In the PAE case we free the pmds as part of the pgd.
62 */
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +010063static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
64{
Jeremy Fitzhardinge6194ba62008-01-30 13:34:11 +010065 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +010066}
67
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080068static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +010069{
Jeremy Fitzhardinge6194ba62008-01-30 13:34:11 +010070 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
71 free_page((unsigned long)pmd);
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +010072}
73
Ingo Molnar5aa05082008-01-31 22:05:48 +010074extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +010075
Jeremy Fitzhardinge6194ba62008-01-30 13:34:11 +010076static inline void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd)
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +010077{
Jeremy Fitzhardinge6194ba62008-01-30 13:34:11 +010078 paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
79
80 /* Note: almost everything apart from _PAGE_PRESENT is
81 reserved at the pmd (PDPT) level. */
82 set_pud(pudp, __pud(__pa(pmd) | _PAGE_PRESENT));
83
84 /*
Jeremy Fitzhardingef5430f92008-02-04 16:48:02 +010085 * According to Intel App note "TLBs, Paging-Structure Caches,
86 * and Their Invalidation", April 2007, document 317080-001,
87 * section 8.1: in PAE mode we explicitly have to flush the
88 * TLB via cr3 if the top-level pgd is changed...
Jeremy Fitzhardinge6194ba62008-01-30 13:34:11 +010089 */
90 if (mm == current->active_mm)
91 write_cr3(read_cr3());
Jeremy Fitzhardingea5a19c62008-01-30 13:33:39 +010092}
93#endif /* CONFIG_X86_PAE */
Linus Torvalds1da177e2005-04-16 15:20:36 -070094
Linus Torvalds1da177e2005-04-16 15:20:36 -070095#endif /* _I386_PGALLOC_H */