blob: 6da309b6fda7faf3dd7927310753bea1da978531 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001#ifndef _ASM_M32R_PGALLOC_H
2#define _ASM_M32R_PGALLOC_H
3
4/* $Id$ */
5
6#include <linux/config.h>
7#include <linux/mm.h>
8
9#include <asm/io.h>
10
11#define pmd_populate_kernel(mm, pmd, pte) \
12 set_pmd(pmd, __pmd(_PAGE_TABLE + __pa(pte)))
13
14static __inline__ void pmd_populate(struct mm_struct *mm, pmd_t *pmd,
15 struct page *pte)
16{
17 set_pmd(pmd, __pmd(_PAGE_TABLE + page_to_phys(pte)));
18}
19
20/*
21 * Allocate and free page tables.
22 */
23static __inline__ pgd_t *pgd_alloc(struct mm_struct *mm)
24{
25 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
26
27 return pgd;
28}
29
30static __inline__ void pgd_free(pgd_t *pgd)
31{
32 free_page((unsigned long)pgd);
33}
34
35static __inline__ pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
36 unsigned long address)
37{
38 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_ZERO);
39
40 return pte;
41}
42
43static __inline__ struct page *pte_alloc_one(struct mm_struct *mm,
44 unsigned long address)
45{
46 struct page *pte = alloc_page(GFP_KERNEL|__GFP_ZERO);
47
48
49 return pte;
50}
51
52static __inline__ void pte_free_kernel(pte_t *pte)
53{
54 free_page((unsigned long)pte);
55}
56
57static __inline__ void pte_free(struct page *pte)
58{
59 __free_page(pte);
60}
61
62#define __pte_free_tlb(tlb, pte) pte_free((pte))
63
64/*
65 * allocating and freeing a pmd is trivial: the 1-entry pmd is
66 * inside the pgd, so has no extra memory associated with it.
67 * (In the PAE case we free the pmds as part of the pgd.)
68 */
69
70#define pmd_alloc_one(mm, addr) ({ BUG(); ((pmd_t *)2); })
71#define pmd_free(x) do { } while (0)
72#define __pmd_free_tlb(tlb, x) do { } while (0)
73#define pgd_populate(mm, pmd, pte) BUG()
74
75#define check_pgt_cache() do { } while (0)
76
77#endif /* _ASM_M32R_PGALLOC_H */
78