blob: ae23839db20bfe55e61732b40ca5a932e2284708 [file] [log] [blame]
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07001#ifndef _ASM_X86_PGALLOC_H
2#define _ASM_X86_PGALLOC_H
3
4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */
6#include <linux/pagemap.h>
7
Jeremy Fitzhardinge1d262d32008-03-17 16:36:56 -07008#ifdef CONFIG_PARAVIRT
9#include <asm/paravirt.h>
10#else
11#define paravirt_alloc_pt(mm, pfn) do { } while (0)
12#define paravirt_alloc_pd(mm, pfn) do { } while (0)
13#define paravirt_alloc_pd_clone(pfn, clonepfn, start, count) do { } while (0)
14#define paravirt_release_pt(pfn) do { } while (0)
15#define paravirt_release_pd(pfn) do { } while (0)
16#endif
17
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070018/*
19 * Allocate and free page tables.
20 */
21extern pgd_t *pgd_alloc(struct mm_struct *);
22extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
23
24extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
25extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
26
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070027/* Should really implement gc for free page table pages. This could be
28 done with a reference count in struct page. */
29
30static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
31{
32 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
33 free_page((unsigned long)pte);
34}
35
36static inline void pte_free(struct mm_struct *mm, struct page *pte)
37{
38 __free_page(pte);
39}
40
41extern void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
42
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070043static inline void pmd_populate_kernel(struct mm_struct *mm,
44 pmd_t *pmd, pte_t *pte)
45{
46 paravirt_alloc_pt(mm, __pa(pte) >> PAGE_SHIFT);
47 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
48}
49
50static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
51 struct page *pte)
52{
53 unsigned long pfn = page_to_pfn(pte);
54
55 paravirt_alloc_pt(mm, pfn);
56 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
57}
58
59#define pmd_pgtable(pmd) pmd_page(pmd)
60
61#if PAGETABLE_LEVELS > 2
62static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
63{
64 return (pmd_t *)get_zeroed_page(GFP_KERNEL|__GFP_REPEAT);
65}
66
67static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
68{
69 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
70 free_page((unsigned long)pmd);
71}
72
73extern void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
74#endif /* PAGETABLE_LEVELS > 2 */
75
Thomas Gleixner96a388d2007-10-11 11:20:03 +020076#ifdef CONFIG_X86_32
77# include "pgalloc_32.h"
78#else
79# include "pgalloc_64.h"
80#endif
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070081
82#endif /* _ASM_X86_PGALLOC_H */