blob: 26e03a1f7ca4b6d602e9cc80d2b04ff9bb28600f [file] [log] [blame]
Matt Fleming2a5eacc2009-12-31 12:19:24 +00001#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09002#include <linux/slab.h>
Matt Fleming2a5eacc2009-12-31 12:19:24 +00003
4#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
5
6static struct kmem_cache *pgd_cachep;
Paul Mundt782bb5a2010-01-13 19:11:14 +09007#if PAGETABLE_LEVELS > 2
Matt Fleming2a5eacc2009-12-31 12:19:24 +00008static struct kmem_cache *pmd_cachep;
9#endif
10
11void pgd_ctor(void *x)
12{
13 pgd_t *pgd = x;
14
15 memcpy(pgd + USER_PTRS_PER_PGD,
16 swapper_pg_dir + USER_PTRS_PER_PGD,
17 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
18}
19
20void pgtable_cache_init(void)
21{
22 pgd_cachep = kmem_cache_create("pgd_cache",
23 PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
24 PAGE_SIZE, SLAB_PANIC, pgd_ctor);
Paul Mundt782bb5a2010-01-13 19:11:14 +090025#if PAGETABLE_LEVELS > 2
Matt Fleming2a5eacc2009-12-31 12:19:24 +000026 pmd_cachep = kmem_cache_create("pmd_cache",
27 PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
28 PAGE_SIZE, SLAB_PANIC, NULL);
29#endif
30}
31
32pgd_t *pgd_alloc(struct mm_struct *mm)
33{
34 return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP);
35}
36
37void pgd_free(struct mm_struct *mm, pgd_t *pgd)
38{
39 kmem_cache_free(pgd_cachep, pgd);
40}
41
Paul Mundt782bb5a2010-01-13 19:11:14 +090042#if PAGETABLE_LEVELS > 2
Matt Fleming2a5eacc2009-12-31 12:19:24 +000043void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
44{
45 set_pud(pud, __pud((unsigned long)pmd));
46}
47
48pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
49{
50 return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP);
51}
52
53void pmd_free(struct mm_struct *mm, pmd_t *pmd)
54{
55 kmem_cache_free(pmd_cachep, pmd);
56}
Paul Mundt782bb5a2010-01-13 19:11:14 +090057#endif /* PAGETABLE_LEVELS > 2 */