blob: fbd578daa66e97416058e961dd440774fa9ed586 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
H. Peter Anvin1965aae2008-10-22 22:26:29 -07002#ifndef _ASM_X86_PGALLOC_H
3#define _ASM_X86_PGALLOC_H
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -07004
5#include <linux/threads.h>
6#include <linux/mm.h> /* for struct page */
7#include <linux/pagemap.h>
8
Jeremy Fitzhardingeeba00452008-06-25 00:19:12 -04009static inline int __paravirt_pgd_alloc(struct mm_struct *mm) { return 0; }
10
Jeremy Fitzhardinge1d262d32008-03-17 16:36:56 -070011#ifdef CONFIG_PARAVIRT
12#include <asm/paravirt.h>
13#else
Jeremy Fitzhardingeeba00452008-06-25 00:19:12 -040014#define paravirt_pgd_alloc(mm) __paravirt_pgd_alloc(mm)
15static inline void paravirt_pgd_free(struct mm_struct *mm, pgd_t *pgd) {}
Jeremy Fitzhardinge286cd492008-03-17 16:37:06 -070016static inline void paravirt_alloc_pte(struct mm_struct *mm, unsigned long pfn) {}
17static inline void paravirt_alloc_pmd(struct mm_struct *mm, unsigned long pfn) {}
18static inline void paravirt_alloc_pmd_clone(unsigned long pfn, unsigned long clonepfn,
19 unsigned long start, unsigned long count) {}
20static inline void paravirt_alloc_pud(struct mm_struct *mm, unsigned long pfn) {}
Kirill A. Shutemov335437f2017-03-30 11:07:28 +030021static inline void paravirt_alloc_p4d(struct mm_struct *mm, unsigned long pfn) {}
Jeremy Fitzhardinge286cd492008-03-17 16:37:06 -070022static inline void paravirt_release_pte(unsigned long pfn) {}
23static inline void paravirt_release_pmd(unsigned long pfn) {}
24static inline void paravirt_release_pud(unsigned long pfn) {}
Kirill A. Shutemov335437f2017-03-30 11:07:28 +030025static inline void paravirt_release_p4d(unsigned long pfn) {}
Jeremy Fitzhardinge1d262d32008-03-17 16:36:56 -070026#endif
27
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070028/*
Ian Campbell14315592010-02-17 10:38:10 +000029 * Flags to use when allocating a user page table page.
30 */
31extern gfp_t __userpte_alloc_gfp;
32
Dave Hansend9e9a642017-12-04 15:07:39 +010033#ifdef CONFIG_PAGE_TABLE_ISOLATION
34/*
35 * Instead of one PGD, we acquire two PGDs. Being order-1, it is
36 * both 8k in size and 8k-aligned. That lets us just flip bit 12
37 * in a pointer to swap between the two 4k halves.
38 */
39#define PGD_ALLOCATION_ORDER 1
40#else
41#define PGD_ALLOCATION_ORDER 0
42#endif
43
Ian Campbell14315592010-02-17 10:38:10 +000044/*
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -070045 * Allocate and free page tables.
46 */
47extern pgd_t *pgd_alloc(struct mm_struct *);
48extern void pgd_free(struct mm_struct *mm, pgd_t *pgd);
49
50extern pte_t *pte_alloc_one_kernel(struct mm_struct *, unsigned long);
51extern pgtable_t pte_alloc_one(struct mm_struct *, unsigned long);
52
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070053/* Should really implement gc for free page table pages. This could be
54 done with a reference count in struct page. */
55
56static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
57{
58 BUG_ON((unsigned long)pte & (PAGE_SIZE-1));
59 free_page((unsigned long)pte);
60}
61
62static inline void pte_free(struct mm_struct *mm, struct page *pte)
63{
Peter Zijlstra42ef73f2009-01-23 17:37:49 +010064 pgtable_page_dtor(pte);
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070065 __free_page(pte);
66}
67
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100068extern void ___pte_free_tlb(struct mmu_gather *tlb, struct page *pte);
69
70static inline void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte,
71 unsigned long address)
72{
73 ___pte_free_tlb(tlb, pte);
74}
Jeremy Fitzhardinge397f6872008-03-17 16:36:57 -070075
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070076static inline void pmd_populate_kernel(struct mm_struct *mm,
77 pmd_t *pmd, pte_t *pte)
78{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070079 paravirt_alloc_pte(mm, __pa(pte) >> PAGE_SHIFT);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070080 set_pmd(pmd, __pmd(__pa(pte) | _PAGE_TABLE));
81}
82
83static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
84 struct page *pte)
85{
86 unsigned long pfn = page_to_pfn(pte);
87
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -070088 paravirt_alloc_pte(mm, pfn);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070089 set_pmd(pmd, __pmd(((pteval_t)pfn << PAGE_SHIFT) | _PAGE_TABLE));
90}
91
92#define pmd_pgtable(pmd) pmd_page(pmd)
93
Kirill A. Shutemov98233362015-04-14 15:46:14 -070094#if CONFIG_PGTABLE_LEVELS > 2
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -070095static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
96{
Kirill A. Shutemov94918462013-11-14 14:31:10 -080097 struct page *page;
Vladimir Davydov3e79ec72016-07-26 15:24:30 -070098 gfp_t gfp = GFP_KERNEL_ACCOUNT | __GFP_ZERO;
99
100 if (mm == &init_mm)
101 gfp &= ~__GFP_ACCOUNT;
102 page = alloc_pages(gfp, 0);
Kirill A. Shutemov94918462013-11-14 14:31:10 -0800103 if (!page)
104 return NULL;
105 if (!pgtable_pmd_page_ctor(page)) {
106 __free_pages(page, 0);
107 return NULL;
108 }
109 return (pmd_t *)page_address(page);
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -0700110}
111
112static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
113{
114 BUG_ON((unsigned long)pmd & (PAGE_SIZE-1));
Kirill A. Shutemov94918462013-11-14 14:31:10 -0800115 pgtable_pmd_page_dtor(virt_to_page(pmd));
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -0700116 free_page((unsigned long)pmd);
117}
118
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000119extern void ___pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd);
120
121static inline void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd,
Uwe Kleine-Königb5950762010-11-01 15:38:34 -0400122 unsigned long address)
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000123{
124 ___pmd_free_tlb(tlb, pmd);
125}
Jeremy Fitzhardinge170fdff2008-03-17 16:36:58 -0700126
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700127#ifdef CONFIG_X86_PAE
128extern void pud_populate(struct mm_struct *mm, pud_t *pudp, pmd_t *pmd);
129#else /* !CONFIG_X86_PAE */
130static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
131{
Jeremy Fitzhardinge6944a9c2008-03-17 16:37:01 -0700132 paravirt_alloc_pmd(mm, __pa(pmd) >> PAGE_SHIFT);
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700133 set_pud(pud, __pud(_PAGE_TABLE | __pa(pmd)));
134}
135#endif /* CONFIG_X86_PAE */
136
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700137#if CONFIG_PGTABLE_LEVELS > 3
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300138static inline void p4d_populate(struct mm_struct *mm, p4d_t *p4d, pud_t *pud)
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700139{
Jeremy Fitzhardinge2761fa02008-03-17 16:37:02 -0700140 paravirt_alloc_pud(mm, __pa(pud) >> PAGE_SHIFT);
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300141 set_p4d(p4d, __p4d(_PAGE_TABLE | __pa(pud)));
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700142}
143
144static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
145{
Vladimir Davydov3e79ec72016-07-26 15:24:30 -0700146 gfp_t gfp = GFP_KERNEL_ACCOUNT;
147
148 if (mm == &init_mm)
149 gfp &= ~__GFP_ACCOUNT;
150 return (pud_t *)get_zeroed_page(gfp);
Jeremy Fitzhardinge5a5f8f42008-03-17 16:36:59 -0700151}
152
153static inline void pud_free(struct mm_struct *mm, pud_t *pud)
154{
155 BUG_ON((unsigned long)pud & (PAGE_SIZE-1));
156 free_page((unsigned long)pud);
157}
158
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +1000159extern void ___pud_free_tlb(struct mmu_gather *tlb, pud_t *pud);
160
161static inline void __pud_free_tlb(struct mmu_gather *tlb, pud_t *pud,
162 unsigned long address)
163{
164 ___pud_free_tlb(tlb, pud);
165}
166
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300167#if CONFIG_PGTABLE_LEVELS > 4
168static inline void pgd_populate(struct mm_struct *mm, pgd_t *pgd, p4d_t *p4d)
169{
Kirill A. Shutemoved7588d2018-05-18 13:35:24 +0300170 if (!pgtable_l5_enabled())
Kirill A. Shutemov98219dd2018-02-14 21:25:40 +0300171 return;
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300172 paravirt_alloc_p4d(mm, __pa(p4d) >> PAGE_SHIFT);
173 set_pgd(pgd, __pgd(_PAGE_TABLE | __pa(p4d)));
174}
175
176static inline p4d_t *p4d_alloc_one(struct mm_struct *mm, unsigned long addr)
177{
178 gfp_t gfp = GFP_KERNEL_ACCOUNT;
179
180 if (mm == &init_mm)
181 gfp &= ~__GFP_ACCOUNT;
182 return (p4d_t *)get_zeroed_page(gfp);
183}
184
185static inline void p4d_free(struct mm_struct *mm, p4d_t *p4d)
186{
Andrey Ryabinin0e311d232018-06-25 13:24:27 +0300187 if (!pgtable_l5_enabled())
188 return;
189
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300190 BUG_ON((unsigned long)p4d & (PAGE_SIZE-1));
191 free_page((unsigned long)p4d);
192}
193
194extern void ___p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d);
195
196static inline void __p4d_free_tlb(struct mmu_gather *tlb, p4d_t *p4d,
197 unsigned long address)
198{
Kirill A. Shutemoved7588d2018-05-18 13:35:24 +0300199 if (pgtable_l5_enabled())
Kirill A. Shutemov98219dd2018-02-14 21:25:40 +0300200 ___p4d_free_tlb(tlb, p4d);
Kirill A. Shutemovf2a6a702017-03-17 21:55:15 +0300201}
202
203#endif /* CONFIG_PGTABLE_LEVELS > 4 */
Kirill A. Shutemov98233362015-04-14 15:46:14 -0700204#endif /* CONFIG_PGTABLE_LEVELS > 3 */
205#endif /* CONFIG_PGTABLE_LEVELS > 2 */
Jeremy Fitzhardinge4f76cd32008-03-17 16:36:55 -0700206
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700207#endif /* _ASM_X86_PGALLOC_H */