blob: a25d5402987451b746bb01f8d0b1b07331af9377 [file] [log] [blame]
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07001#ifndef _ASM_X86_PGALLOC_H
2#define _ASM_X86_PGALLOC_H
3
4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */
6#include <linux/pagemap.h>
7
Jeremy Fitzhardinge1d262d32008-03-17 16:36:56 -07008#ifdef CONFIG_PARAVIRT
9#include <asm/paravirt.h>
10#else
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070011#define paravirt_alloc_pte(mm, pfn) do { } while (0)
12#define paravirt_alloc_pmd(mm, pfn) do { } while (0)
13#define paravirt_alloc_pmd_clone(pfn, clonepfn, start, count) do { } while (0)
14#define paravirt_release_pte(pfn) do { } while (0)
15#define paravirt_release_pmd(pfn) do { } while (0)
Jeremy Fitzhardinge1d262d32008-03-17 16:36:56 -070016#endif
17
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070018/*
19 * Allocate and free page tables.
20 */
21extern pgd_t *pgd_alloc(struct mm_struct *);
22extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
23
24extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
25extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
26
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070027/* Should really implement gc for free page table pages. This could be
28 done with a reference count in struct page. */
29
30static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
31{
32 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
33 free_page((unsigned long)pte);
34}
35
36static inline void pte_free(struct mm_struct *mm, struct page *pte)
37{
38 __free_page(pte);
39}
40
41extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
42
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070043static inline void pmd_populate_kernel(struct mm_struct *mm,
44 pmd_t *pmd, pte_t *pte)
45{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070046 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070047 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
48}
49
50static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
51 struct page *pte)
52{
53 unsigned long pfn = page_to_pfn(pte);
54
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070055 paravirt_alloc_pte(mm, pfn);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070056 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
57}
58
59#define pmd_pgtable(pmd) pmd_page(pmd)
60
61#if PAGETABLE_LEVELS > 2
62static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
63{
64 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
65}
66
67static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
68{
69 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
70 free_page((unsigned long)pmd);
71}
72
73extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070074
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070075#ifdef CONFIG_X86_PAE
76extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
77#else /* !CONFIG_X86_PAE */
78static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
79{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070080 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -070081 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
82}
83#endif /* CONFIG_X86_PAE */
84
85#if PAGETABLE_LEVELS > 3
86static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, pud_t *pud)
87{
88 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(pud)));
89}
90
91static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
92{
93 return (pud_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
94}
95
96static inline void pud_free(struct mm_struct *mm, pud_t *pud)
97{
98 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
99 free_page((unsigned long)pud);
100}
101
102extern void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
103#endif /* PAGETABLE_LEVELS > 3 */
104#endif /* PAGETABLE_LEVELS > 2 */
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700105
106#endif /* _ASM_X86_PGALLOC_H */