blob: 812a1d8f35cbc58f21d9096b3ef00c8678d930f9 [file] [log] [blame]
David Gibsonf88df142007-04-30 16:30:56 +10001#ifndef _ASM_POWERPC_PGALLOC_64_H
2#define _ASM_POWERPC_PGALLOC_64_H
3/*
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 */
9
10#include <linux/mm.h>
11#include <linux/slab.h>
12#include <linux/cpumask.h>
13#include <linux/percpu.h>
14
Paul Mackerrasfa282372008-01-24 08:35:13 +110015#ifndef CONFIG_PPC_SUBPAGE_PROT
16static inline void subpage_prot_free(pgd_t *pgd) {}
17#endif
18
David Gibsonf88df142007-04-30 16:30:56 +100019extern struct kmem_cache *pgtable_cache[];
20
Hugh Dickins517e2262007-05-09 14:38:48 +100021#define PGD_CACHE_NUM 0
22#define PUD_CACHE_NUM 1
23#define PMD_CACHE_NUM 1
24#define HUGEPTE_CACHE_NUM 2
Jon Tollefson0d9ea752008-07-23 21:27:56 -070025#define PTE_NONCACHE_NUM 7 /* from GFP rather than kmem_cache */
David Gibsonf88df142007-04-30 16:30:56 +100026
27static inline pgd_t *pgd_alloc(struct mm_struct *mm)
28{
29 return kmem_cache_alloc(pgtable_cache[PGD_CACHE_NUM], GFP_KERNEL);
30}
31
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080032static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
David Gibsonf88df142007-04-30 16:30:56 +100033{
Paul Mackerrasfa282372008-01-24 08:35:13 +110034 subpage_prot_free(pgd);
David Gibsonf88df142007-04-30 16:30:56 +100035 kmem_cache_free(pgtable_cache[PGD_CACHE_NUM], pgd);
36}
37
38#ifndef CONFIG_PPC_64K_PAGES
39
40#define pgd_populate(MM, PGD, PUD) pgd_set(PGD, PUD)
41
42static inline pud_t *pud_alloc_one(struct mm_struct *mm, unsigned long addr)
43{
44 return kmem_cache_alloc(pgtable_cache[PUD_CACHE_NUM],
45 GFP_KERNEL|__GFP_REPEAT);
46}
47
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080048static inline void pud_free(struct mm_struct *mm, pud_t *pud)
David Gibsonf88df142007-04-30 16:30:56 +100049{
50 kmem_cache_free(pgtable_cache[PUD_CACHE_NUM], pud);
51}
52
53static inline void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
54{
55 pud_set(pud, (unsigned long)pmd);
56}
57
58#define pmd_populate(mm, pmd, pte_page) \
59 pmd_populate_kernel(mm, pmd, page_address(pte_page))
60#define pmd_populate_kernel(mm, pmd, pte) pmd_set(pmd, (unsigned long)(pte))
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080061#define pmd_pgtable(pmd) pmd_page(pmd)
David Gibsonf88df142007-04-30 16:30:56 +100062
63
64#else /* CONFIG_PPC_64K_PAGES */
65
66#define pud_populate(mm, pud, pmd) pud_set(pud, (unsigned long)pmd)
67
68static inline void pmd_populate_kernel(struct mm_struct *mm, pmd_t *pmd,
69 pte_t *pte)
70{
71 pmd_set(pmd, (unsigned long)pte);
72}
73
74#define pmd_populate(mm, pmd, pte_page) \
75 pmd_populate_kernel(mm, pmd, page_address(pte_page))
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080076#define pmd_pgtable(pmd) pmd_page(pmd)
David Gibsonf88df142007-04-30 16:30:56 +100077
78#endif /* CONFIG_PPC_64K_PAGES */
79
80static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
81{
82 return kmem_cache_alloc(pgtable_cache[PMD_CACHE_NUM],
83 GFP_KERNEL|__GFP_REPEAT);
84}
85
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -080086static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
David Gibsonf88df142007-04-30 16:30:56 +100087{
88 kmem_cache_free(pgtable_cache[PMD_CACHE_NUM], pmd);
89}
90
91static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
92 unsigned long address)
93{
Hugh Dickins517e2262007-05-09 14:38:48 +100094 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
David Gibsonf88df142007-04-30 16:30:56 +100095}
96
Martin Schwidefsky2f569af2008-02-08 04:22:04 -080097static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
98 unsigned long address)
David Gibsonf88df142007-04-30 16:30:56 +100099{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800100 struct page *page;
101 pte_t *pte;
102
103 pte = pte_alloc_one_kernel(mm, address);
104 if (!pte)
105 return NULL;
106 page = virt_to_page(pte);
107 pgtable_page_ctor(page);
108 return page;
David Gibsonf88df142007-04-30 16:30:56 +1000109}
110
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800111static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
David Gibsonf88df142007-04-30 16:30:56 +1000112{
Hugh Dickins517e2262007-05-09 14:38:48 +1000113 free_page((unsigned long)pte);
David Gibsonf88df142007-04-30 16:30:56 +1000114}
115
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800116static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
David Gibsonf88df142007-04-30 16:30:56 +1000117{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800118 pgtable_page_dtor(ptepage);
Hugh Dickins517e2262007-05-09 14:38:48 +1000119 __free_page(ptepage);
David Gibsonf88df142007-04-30 16:30:56 +1000120}
121
Jon Tollefson0d9ea752008-07-23 21:27:56 -0700122#define PGF_CACHENUM_MASK 0x7
David Gibsonf88df142007-04-30 16:30:56 +1000123
124typedef struct pgtable_free {
125 unsigned long val;
126} pgtable_free_t;
127
128static inline pgtable_free_t pgtable_free_cache(void *p, int cachenum,
129 unsigned long mask)
130{
131 BUG_ON(cachenum > PGF_CACHENUM_MASK);
132
133 return (pgtable_free_t){.val = ((unsigned long) p & ~mask) | cachenum};
134}
135
136static inline void pgtable_free(pgtable_free_t pgf)
137{
138 void *p = (void *)(pgf.val & ~PGF_CACHENUM_MASK);
139 int cachenum = pgf.val & PGF_CACHENUM_MASK;
140
Hugh Dickins517e2262007-05-09 14:38:48 +1000141 if (cachenum == PTE_NONCACHE_NUM)
142 free_page((unsigned long)p);
143 else
144 kmem_cache_free(pgtable_cache[cachenum], p);
David Gibsonf88df142007-04-30 16:30:56 +1000145}
146
147extern void pgtable_free_tlb(struct mmu_gather *tlb, pgtable_free_t pgf);
148
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800149#define __pte_free_tlb(tlb,ptepage) \
150do { \
151 pgtable_page_dtor(ptepage); \
David Gibsonf88df142007-04-30 16:30:56 +1000152 pgtable_free_tlb(tlb, pgtable_free_cache(page_address(ptepage), \
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800153 PTE_NONCACHE_NUM, PTE_TABLE_SIZE-1)); \
154} while (0)
David Gibsonf88df142007-04-30 16:30:56 +1000155#define __pmd_free_tlb(tlb, pmd) \
156 pgtable_free_tlb(tlb, pgtable_free_cache(pmd, \
157 PMD_CACHE_NUM, PMD_TABLE_SIZE-1))
158#ifndef CONFIG_PPC_64K_PAGES
159#define __pud_free_tlb(tlb, pud) \
160 pgtable_free_tlb(tlb, pgtable_free_cache(pud, \
161 PUD_CACHE_NUM, PUD_TABLE_SIZE-1))
162#endif /* CONFIG_PPC_64K_PAGES */
163
164#define check_pgt_cache() do { } while (0)
165
166#endif /* _ASM_POWERPC_PGALLOC_64_H */