blob: 2f585054c63c897511c0aa44601bba37ee619c88 [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_PGALLOC_H
2#define _ASM_X86_PGALLOC_H
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07003
4#include <linux/threads.h>
5#include <linux/mm.h> /* for struct page */
6#include <linux/pagemap.h>
7
Jeremy Fitzhardingeeba00452008-06-25 00:19:12 -04008static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
9
Jeremy Fitzhardinge1d262d32008-03-17 16:36:56 -070010#ifdef CONFIG_PARAVIRT
11#include <asm/paravirt.h>
12#else
Jeremy Fitzhardingeeba00452008-06-25 00:19:12 -040013#define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
14static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
Jeremy Fitzhardinge286cd492008-03-17 16:37:06 -070015static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
16static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
17static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
18 unsigned long start, unsigned long count) {}
19static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
20static inline void paravirt_release_pte(unsigned long pfn) {}
21static inline void paravirt_release_pmd(unsigned long pfn) {}
22static inline void paravirt_release_pud(unsigned long pfn) {}
Jeremy Fitzhardinge1d262d32008-03-17 16:36:56 -070023#endif
24
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070025/*
Ian Campbell14315592010-02-17 10:38:10 +000026 * Flags to use when allocating a user page table page.
27 */
28extern gfp_t __userpte_alloc_gfp;
29
30/*
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070031 * Allocate and free page tables.
32 */
33extern pgd_t *pgd_alloc(struct mm_struct *);
34extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
35
36extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
37extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
38
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070039/* Should really implement gc for free page table pages. This could be
40 done with a reference count in struct page. */
41
42static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
43{
44 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
45 free_page((unsigned long)pte);
46}
47
48static inline void pte_free(struct mm_struct *mm, struct page *pte)
49{
Peter Zijlstra42ef73f2009-01-23 17:37:49 +010050 pgtable_page_dtor(pte);
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070051 __free_page(pte);
52}
53
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100054extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
55
56static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
57 unsigned long address)
58{
59 ___pte_free_tlb(tlb, pte);
60}
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070061
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070062static inline void pmd_populate_kernel(struct mm_struct *mm,
63 pmd_t *pmd, pte_t *pte)
64{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070065 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070066 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
67}
68
69static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
70 struct page *pte)
71{
72 unsigned long pfn = page_to_pfn(pte);
73
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070074 paravirt_alloc_pte(mm, pfn);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070075 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
76}
77
78#define pmd_pgtable(pmd) pmd_page(pmd)
79
Kirill A. Shutemov98233362015-04-14 15:46:14 -070080#if CONFIG_PGTABLE_LEVELS > 2
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070081static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
82{
Kirill A. Shutemov94918462013-11-14 14:31:10 -080083 struct page *page;
Vladimir Davydov3e79ec72016-07-26 15:24:30 -070084 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
85
86 if (mm == &init_mm)
87 gfp &= ~__GFP_ACCOUNT;
88 page = alloc_pages(gfp, 0);
Kirill A. Shutemov94918462013-11-14 14:31:10 -080089 if (!page)
90 return NULL;
91 if (!pgtable_pmd_page_ctor(page)) {
92 __free_pages(page, 0);
93 return NULL;
94 }
95 return (pmd_t *)page_address(page);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070096}
97
98static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
99{
100 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
Kirill A. Shutemov94918462013-11-14 14:31:10 -0800101 pgtable_pmd_page_dtor(virt_to_page(pmd));
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -0700102 free_page((unsigned long)pmd);
103}
104
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000105extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
106
107static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
Uwe Kleine-Königb5950762010-11-01 15:38:34 -0400108 unsigned long address)
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000109{
110 ___pmd_free_tlb(tlb, pmd);
111}
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -0700112
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700113#ifdef CONFIG_X86_PAE
114extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
115#else /* !CONFIG_X86_PAE */
116static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
117{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700118 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700119 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
120}
121#endif /* CONFIG_X86_PAE */
122
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700123#if CONFIG_PGTABLE_LEVELS > 3
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300124static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700125{
Jeremy Fitzhardinge2761fa02008-03-17 16:37:02 -0700126 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300127 set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700128}
129
130static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
131{
Vladimir Davydov3e79ec72016-07-26 15:24:30 -0700132 gfp_t gfp = GFP_KERNEL_ACCOUNT;
133
134 if (mm == &init_mm)
135 gfp &= ~__GFP_ACCOUNT;
136 return (pud_t *)get_zeroed_page(gfp);
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700137}
138
139static inline void pud_free(struct mm_struct *mm, pud_t *pud)
140{
141 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
142 free_page((unsigned long)pud);
143}
144
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000145extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
146
147static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
148 unsigned long address)
149{
150 ___pud_free_tlb(tlb, pud);
151}
152
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300153#if CONFIG_PGTABLE_LEVELS > 4
154static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
155{
156 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
157 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
158}
159
160static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
161{
162 gfp_t gfp = GFP_KERNEL_ACCOUNT;
163
164 if (mm == &init_mm)
165 gfp &= ~__GFP_ACCOUNT;
166 return (p4d_t *)get_zeroed_page(gfp);
167}
168
169static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
170{
171 BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
172 free_page((unsigned long)p4d);
173}
174
175extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d);
176
177static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
178 unsigned long address)
179{
180 ___p4d_free_tlb(tlb, p4d);
181}
182
183#endif /* CONFIG_PGTABLE_LEVELS > 4 */
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700184#endif /* CONFIG_PGTABLE_LEVELS > 3 */
185#endif /* CONFIG_PGTABLE_LEVELS > 2 */
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700186
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700187#endif /* _ASM_X86_PGALLOC_H */