blob: 678251ac1db8b5297f0b7143430af58109532d8a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef __ASM_SH64_PGALLOC_H
2#define __ASM_SH64_PGALLOC_H
3
4/*
5 * This file is subject to the terms and conditions of the GNU General Public
6 * License. See the file "COPYING" in the main directory of this archive
7 * for more details.
8 *
9 * include/asm-sh64/pgalloc.h
10 *
11 * Copyright (C) 2000, 2001 Paolo Alberelli
12 * Copyright (C) 2003, 2004 Paul Mundt
13 * Copyright (C) 2003, 2004 Richard Curnow
14 *
15 */
16
17#include <linux/threads.h>
18#include <linux/mm.h>
19
20#define pgd_quicklist (current_cpu_data.pgd_quick)
21#define pmd_quicklist (current_cpu_data.pmd_quick)
22#define pte_quicklist (current_cpu_data.pte_quick)
23#define pgtable_cache_size (current_cpu_data.pgtable_cache_sz)
24
25static inline void pgd_init(unsigned long page)
26{
27 unsigned long *pgd = (unsigned long *)page;
28 extern pte_t empty_bad_pte_table[PTRS_PER_PTE];
29 int i;
30
31 for (i = 0; i < USER_PTRS_PER_PGD; i++)
32 pgd[i] = (unsigned long)empty_bad_pte_table;
33}
34
35/*
36 * Allocate and free page tables. The xxx_kernel() versions are
37 * used to allocate a kernel page table - this turns on ASN bits
38 * if any.
39 */
40
Adrian Bunkca5ed2f2006-01-09 20:54:47 -080041static inline pgd_t *get_pgd_slow(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070042{
43 unsigned int pgd_size = (USER_PTRS_PER_PGD * sizeof(pgd_t));
44 pgd_t *ret = (pgd_t *)kmalloc(pgd_size, GFP_KERNEL);
45 return ret;
46}
47
Adrian Bunkca5ed2f2006-01-09 20:54:47 -080048static inline pgd_t *get_pgd_fast(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070049{
50 unsigned long *ret;
51
52 if ((ret = pgd_quicklist) != NULL) {
53 pgd_quicklist = (unsigned long *)(*ret);
54 ret[0] = 0;
55 pgtable_cache_size--;
56 } else
57 ret = (unsigned long *)get_pgd_slow();
58
59 if (ret) {
60 memset(ret, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
61 }
62 return (pgd_t *)ret;
63}
64
Adrian Bunkca5ed2f2006-01-09 20:54:47 -080065static inline void free_pgd_fast(pgd_t *pgd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070066{
67 *(unsigned long *)pgd = (unsigned long) pgd_quicklist;
68 pgd_quicklist = (unsigned long *) pgd;
69 pgtable_cache_size++;
70}
71
Adrian Bunkca5ed2f2006-01-09 20:54:47 -080072static inline void free_pgd_slow(pgd_t *pgd)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
74 kfree((void *)pgd);
75}
76
77extern pte_t *get_pte_slow(pmd_t *pmd, unsigned long address_preadjusted);
78extern pte_t *get_pte_kernel_slow(pmd_t *pmd, unsigned long address_preadjusted);
79
Adrian Bunkca5ed2f2006-01-09 20:54:47 -080080static inline pte_t *get_pte_fast(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070081{
82 unsigned long *ret;
83
84 if((ret = (unsigned long *)pte_quicklist) != NULL) {
85 pte_quicklist = (unsigned long *)(*ret);
86 ret[0] = ret[1];
87 pgtable_cache_size--;
88 }
89 return (pte_t *)ret;
90}
91
Adrian Bunkca5ed2f2006-01-09 20:54:47 -080092static inline void free_pte_fast(pte_t *pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -070093{
94 *(unsigned long *)pte = (unsigned long) pte_quicklist;
95 pte_quicklist = (unsigned long *) pte;
96 pgtable_cache_size++;
97}
98
99static inline void pte_free_kernel(pte_t *pte)
100{
101 free_page((unsigned long)pte);
102}
103
104static inline void pte_free(struct page *pte)
105{
106 __free_page(pte);
107}
108
109static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
110 unsigned long address)
111{
112 pte_t *pte;
113
114 pte = (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT|__GFP_ZERO);
115
116 return pte;
117}
118
119static inline struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
120{
121 struct page *pte;
122
123 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
124
125 return pte;
126}
127
128#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
129
130/*
131 * allocating and freeing a pmd is trivial: the 1-entry pmd is
132 * inside the pgd, so has no extra memory associated with it.
133 */
134
135#if defined(CONFIG_SH64_PGTABLE_2_LEVEL)
136
137#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
138#define pmd_free(x) do { } while (0)
139#define pgd_populate(mm, pmd, pte) BUG()
140#define __pte_free_tlb(tlb,pte) tlb_remove_page((tlb),(pte))
141#define __pmd_free_tlb(tlb,pmd) do { } while (0)
142
143#elif defined(CONFIG_SH64_PGTABLE_3_LEVEL)
144
145static __inline__ pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
146{
147 pmd_t *pmd;
148 pmd = (pmd_t *) __get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
149 return pmd;
150}
151
152static __inline__ void pmd_free(pmd_t *pmd)
153{
154 free_page((unsigned long) pmd);
155}
156
157#define pgd_populate(mm, pgd, pmd) pgd_set(pgd, pmd)
158#define __pmd_free_tlb(tlb,pmd) pmd_free(pmd)
159
160#else
161#error "No defined page table size"
162#endif
163
164#define check_pgt_cache() do { } while (0)
165#define pgd_free(pgd) free_pgd_slow(pgd)
166#define pgd_alloc(mm) get_pgd_fast()
167
168extern int do_check_pgt_cache(int, int);
169
Adrian Bunkca5ed2f2006-01-09 20:54:47 -0800170static inline void set_pgdir(unsigned long address, pgd_t entry)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171{
172 struct task_struct * p;
173 pgd_t *pgd;
174
175 read_lock(&tasklist_lock);
176 for_each_process(p) {
177 if (!p->mm)
178 continue;
179 *pgd_offset(p->mm,address) = entry;
180 }
181 read_unlock(&tasklist_lock);
182 for (pgd = (pgd_t *)pgd_quicklist; pgd; pgd = (pgd_t *)*(unsigned long *)pgd)
183 pgd[address >> PGDIR_SHIFT] = entry;
184}
185
186#define pmd_populate_kernel(mm, pmd, pte) \
187 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) (pte)))
188
189static inline void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
190 struct page *pte)
191{
192 set_pmd(pmd, __pmd(_PAGE_TABLE + (unsigned long) page_address (pte)));
193}
194
195#endif /* __ASM_SH64_PGALLOC_H */