blob: cb4867de06a2f805b25786909565ddb8e5efe997 [file] [log] [blame]
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07001#ifndef _SPARC64_PGALLOC_H
2#define _SPARC64_PGALLOC_H
3
4#include <linux/kernel.h>
5#include <linux/sched.h>
6#include <linux/mm.h>
7#include <linux/slab.h>
Sam Ravnborgf5e706a2008-07-17 21:55:51 -07008
9#include <asm/spitfire.h>
10#include <asm/cpudata.h>
11#include <asm/cacheflush.h>
12#include <asm/page.h>
13
14/* Page table allocation/freeing. */
15
David S. Miller4dedbf82011-07-25 17:12:20 -070016extern struct kmem_cache *pgtable_cache;
17
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070018static inline pgd_t *pgd_alloc(struct mm_struct *mm)
19{
David S. Miller4dedbf82011-07-25 17:12:20 -070020 return kmem_cache_alloc(pgtable_cache, GFP_KERNEL);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070021}
22
23static inline void pgd_free(struct mm_struct *mm, pgd_t *pgd)
24{
David S. Miller4dedbf82011-07-25 17:12:20 -070025 kmem_cache_free(pgtable_cache, pgd);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070026}
27
28#define pud_populate(MM, PUD, PMD) pud_set(PUD, PMD)
29
30static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long addr)
31{
David S. Miller4dedbf82011-07-25 17:12:20 -070032 return kmem_cache_alloc(pgtable_cache,
33 GFP_KERNEL|__GFP_REPEAT);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070034}
35
36static inline void pmd_free(struct mm_struct *mm, pmd_t *pmd)
37{
David S. Miller4dedbf82011-07-25 17:12:20 -070038 kmem_cache_free(pgtable_cache, pmd);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070039}
40
41static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
42 unsigned long address)
43{
David S. Miller4dedbf82011-07-25 17:12:20 -070044 return (pte_t *)__get_free_page(GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070045}
46
47static inline pgtable_t pte_alloc_one(struct mm_struct *mm,
48 unsigned long address)
49{
50 struct page *page;
David S. Miller4dedbf82011-07-25 17:12:20 -070051 pte_t *pte;
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070052
David S. Miller4dedbf82011-07-25 17:12:20 -070053 pte = pte_alloc_one_kernel(mm, address);
54 if (!pte)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070055 return NULL;
David S. Miller4dedbf82011-07-25 17:12:20 -070056 page = virt_to_page(pte);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070057 pgtable_page_ctor(page);
58 return page;
59}
60
61static inline void pte_free_kernel(struct mm_struct *mm, pte_t *pte)
62{
David S. Miller4dedbf82011-07-25 17:12:20 -070063 free_page((unsigned long)pte);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070064}
65
66static inline void pte_free(struct mm_struct *mm, pgtable_t ptepage)
67{
68 pgtable_page_dtor(ptepage);
David S. Miller4dedbf82011-07-25 17:12:20 -070069 __free_page(ptepage);
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070070}
71
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070072#define pmd_populate_kernel(MM, PMD, PTE) pmd_set(PMD, PTE)
73#define pmd_populate(MM,PMD,PTE_PAGE) \
74 pmd_populate_kernel(MM,PMD,page_address(PTE_PAGE))
75#define pmd_pgtable(pmd) pmd_page(pmd)
76
David S. Miller4dedbf82011-07-25 17:12:20 -070077#define check_pgt_cache() do { } while (0)
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070078
Peter Zijlstra90f08e32011-05-24 17:11:50 -070079#define __pte_free_tlb(tlb, pte, addr) pte_free((tlb)->mm, pte)
80#define __pmd_free_tlb(tlb, pmd, addr) pmd_free((tlb)->mm, pmd)
81
Sam Ravnborgf5e706a2008-07-17 21:55:51 -070082#endif /* _SPARC64_PGALLOC_H */