blob: 6f21fb1d8726e257abf6ebd46210fc3383a94cdf [file] [log] [blame]
Matt Fleming2a5eacc2009-12-31 12:19:24 +00001#include <linux/mm.h>
2
3#define PGALLOC_GFP GFP_KERNEL | __GFP_REPEAT | __GFP_ZERO
4
5static struct kmem_cache *pgd_cachep;
Paul Mundt782bb5a2010-01-13 19:11:14 +09006#if PAGETABLE_LEVELS > 2
Matt Fleming2a5eacc2009-12-31 12:19:24 +00007static struct kmem_cache *pmd_cachep;
8#endif
9
10void pgd_ctor(void *x)
11{
12 pgd_t *pgd = x;
13
14 memcpy(pgd + USER_PTRS_PER_PGD,
15 swapper_pg_dir + USER_PTRS_PER_PGD,
16 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
17}
18
19void pgtable_cache_init(void)
20{
21 pgd_cachep = kmem_cache_create("pgd_cache",
22 PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
23 PAGE_SIZE, SLAB_PANIC, pgd_ctor);
Paul Mundt782bb5a2010-01-13 19:11:14 +090024#if PAGETABLE_LEVELS > 2
Matt Fleming2a5eacc2009-12-31 12:19:24 +000025 pmd_cachep = kmem_cache_create("pmd_cache",
26 PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
27 PAGE_SIZE, SLAB_PANIC, NULL);
28#endif
29}
30
31pgd_t *pgd_alloc(struct mm_struct *mm)
32{
33 return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP);
34}
35
36void pgd_free(struct mm_struct *mm, pgd_t *pgd)
37{
38 kmem_cache_free(pgd_cachep, pgd);
39}
40
Paul Mundt782bb5a2010-01-13 19:11:14 +090041#if PAGETABLE_LEVELS > 2
Matt Fleming2a5eacc2009-12-31 12:19:24 +000042void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
43{
44 set_pud(pud, __pud((unsigned long)pmd));
45}
46
47pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
48{
49 return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP);
50}
51
52void pmd_free(struct mm_struct *mm, pmd_t *pmd)
53{
54 kmem_cache_free(pmd_cachep, pmd);
55}
Paul Mundt782bb5a2010-01-13 19:11:14 +090056#endif /* PAGETABLE_LEVELS > 2 */