blob: 633139291a481a3986fda12c372d2990632a8620 [file] [log] [blame]
David Gibsonf88df142007-04-30 16:30:56 +10001#ifndef _ASM_POWERPC_PGALLOC_32_H
2#define _ASM_POWERPC_PGALLOC_32_H
3
4#include <linux/threads.h>
Christophe Leroy9b081e12016-12-07 08:47:24 +01005#include <linux/slab.h>
David Gibsonf88df142007-04-30 16:30:56 +10006
Christophe Leroy9b081e12016-12-07 08:47:24 +01007/*
8 * Functions that deal with pagetables that could be at any level of
9 * the table need to be passed an "index_size" so they know how to
10 * handle allocation. For PTE pages (which are linked to a struct
11 * page for now, and drawn from the main get_free_pages() pool), the
12 * allocation size will be (2^index_size * sizeof(pointer)) and
13 * allocations are drawn from the kmem_cache in PGT_CACHE(index_size).
14 *
15 * The maximum index size needs to be big enough to allow any
16 * pagetable sizes we need, but small enough to fit in the low bits of
17 * any page table pointer. In other words all pagetables, even tiny
18 * ones, must be aligned to allow at least enough low 0 bits to
19 * contain this value. This value is also used as a mask, so it must
20 * be one less than a power of two.
21 */
22#define MAX_PGTABLE_INDEX_SIZE 0xf
Kumar Gala0186f472008-11-19 12:50:04 +000023
David Gibsonf88df142007-04-30 16:30:56 +100024extern void __bad_pte(pmd_t *pmd);
25
Christophe Leroy9b081e12016-12-07 08:47:24 +010026extern struct kmem_cache *pgtable_cache[];
27#define PGT_CACHE(shift) ({ \
28 BUG_ON(!(shift)); \
29 pgtable_cache[(shift) - 1]; \
30 })
31
32static inline pgd_t *pgd_alloc(struct mm_struct *mm)
33{
34 return kmem_cache_alloc(PGT_CACHE(PGD_INDEX_SIZE), GFP_KERNEL);
35}
36
37static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
38{
39 kmem_cache_free(PGT_CACHE(PGD_INDEX_SIZE), pgd);
40}
David Gibsonf88df142007-04-30 16:30:56 +100041
42/*
43 * We don't have any real pmd's, and this code never triggers because
44 * the pgd will always be present..
45 */
David Gibsond1953c82007-05-08 12:46:49 +100046/* #define pmd_alloc_one(mm,address) ({ BUG(); ((pmd_t *)2); }) */
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080047#define pmd_free(mm, x) do { } while (0)
Benjamin Herrenschmidt9e1b32c2009-07-22 15:44:28 +100048#define __pmd_free_tlb(tlb,x,a) do { } while (0)
David Gibsond1953c82007-05-08 12:46:49 +100049/* #define pgd_populate(mm, pmd, pte) BUG() */
David Gibsonf88df142007-04-30 16:30:56 +100050
51#ifndef CONFIG_BOOKE
Aneesh Kumar K.Vf281b5d2015-12-01 09:06:35 +053052
53static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
54 pte_t *pte)
55{
56 *pmdp = __pmd(__pa(pte) | _PMD_PRESENT);
57}
58
59static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
60 pgtable_t pte_page)
61{
62 *pmdp = __pmd((page_to_pfn(pte_page) << PAGE_SHIFT) | _PMD_PRESENT);
63}
64
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080065#define pmd_pgtable(pmd) pmd_page(pmd)
David Gibsonf88df142007-04-30 16:30:56 +100066#else
Aneesh Kumar K.Vf281b5d2015-12-01 09:06:35 +053067
68static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmdp,
69 pte_t *pte)
70{
71 *pmdp = __pmd((unsigned long)pte | _PMD_PRESENT);
72}
73
74static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmdp,
75 pgtable_t pte_page)
76{
77 *pmdp = __pmd((unsigned long)lowmem_page_address(pte_page) | _PMD_PRESENT);
78}
79
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080080#define pmd_pgtable(pmd) pmd_page(pmd)
David Gibsonf88df142007-04-30 16:30:56 +100081#endif
82
83extern pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long addr);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080084extern pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long addr);
David Gibsonf88df142007-04-30 16:30:56 +100085
Aneesh Kumar K.Vd614bb02013-04-28 09:37:32 +000086static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
87{
88 free_page((unsigned long)pte);
89}
90
91static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
92{
93 pgtable_page_dtor(ptepage);
94 __free_page(ptepage);
95}
96
David Gibsona0668cd2009-10-28 16:27:18 +000097static inline void pgtable_free(void *table, unsigned index_size)
Kumar Gala0186f472008-11-19 12:50:04 +000098{
Christophe Leroy9b081e12016-12-07 08:47:24 +010099 if (!index_size) {
100 free_page((unsigned long)table);
101 } else {
102 BUG_ON(index_size > MAX_PGTABLE_INDEX_SIZE);
103 kmem_cache_free(PGT_CACHE(index_size), table);
104 }
Kumar Gala0186f472008-11-19 12:50:04 +0000105}
David Gibsonf88df142007-04-30 16:30:56 +1000106
107#define check_pgt_cache() do { } while (0)
108
Aneesh Kumar K.Vd614bb02013-04-28 09:37:32 +0000109#ifdef CONFIG_SMP
110static inline void pgtable_free_tlb(struct mmu_gather *tlb,
111 void *table, int shift)
112{
113 unsigned long pgf = (unsigned long)table;
114 BUG_ON(shift > MAX_PGTABLE_INDEX_SIZE);
115 pgf |= shift;
116 tlb_remove_table(tlb, (void *)pgf);
117}
118
119static inline void __tlb_remove_table(void *_table)
120{
121 void *table = (void *)((unsigned long)_table & ~MAX_PGTABLE_INDEX_SIZE);
122 unsigned shift = (unsigned long)_table & MAX_PGTABLE_INDEX_SIZE;
123
124 pgtable_free(table, shift);
125}
126#else
127static inline void pgtable_free_tlb(struct mmu_gather *tlb,
128 void *table, int shift)
129{
130 pgtable_free(table, shift);
131}
132#endif
133
134static inline void __pte_free_tlb(struct mmu_gather *tlb, pgtable_t table,
135 unsigned long address)
136{
Aneesh Kumar K.Vd614bb02013-04-28 09:37:32 +0000137 tlb_flush_pgtable(tlb, address);
Hong H. Phamcf77ee52013-12-07 09:06:33 -0500138 pgtable_page_dtor(table);
139 pgtable_free_tlb(tlb, page_address(table), 0);
Aneesh Kumar K.Vd614bb02013-04-28 09:37:32 +0000140}
David Gibsonf88df142007-04-30 16:30:56 +1000141#endif /* _ASM_POWERPC_PGALLOC_32_H */