blob: 5c8f9247c3c2d2c37b0785b3cf850da4b91b4a4b [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001// SPDX-License-Identifier: GPL-2.0
Matt Fleming2a5eacc2009-12-31 12:19:24 +00002#include <linux/mm.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +09003#include <linux/slab.h>
Matt Fleming2a5eacc2009-12-31 12:19:24 +00004
Michal Hocko884ed4c2016-06-24 14:49:20 -07005#define PGALLOC_GFP GFP_KERNEL | __GFP_ZERO
Matt Fleming2a5eacc2009-12-31 12:19:24 +00006
7static struct kmem_cache *pgd_cachep;
Paul Mundt782bb5a2010-01-13 19:11:14 +09008#if PAGETABLE_LEVELS > 2
Matt Fleming2a5eacc2009-12-31 12:19:24 +00009static struct kmem_cache *pmd_cachep;
10#endif
11
12void pgd_ctor(void *x)
13{
14 pgd_t *pgd = x;
15
16 memcpy(pgd + USER_PTRS_PER_PGD,
17 swapper_pg_dir + USER_PTRS_PER_PGD,
18 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
19}
20
21void pgtable_cache_init(void)
22{
23 pgd_cachep = kmem_cache_create("pgd_cache",
24 PTRS_PER_PGD * (1<<PTE_MAGNITUDE),
25 PAGE_SIZE, SLAB_PANIC, pgd_ctor);
Paul Mundt782bb5a2010-01-13 19:11:14 +090026#if PAGETABLE_LEVELS > 2
Matt Fleming2a5eacc2009-12-31 12:19:24 +000027 pmd_cachep = kmem_cache_create("pmd_cache",
28 PTRS_PER_PMD * (1<<PTE_MAGNITUDE),
29 PAGE_SIZE, SLAB_PANIC, NULL);
30#endif
31}
32
33pgd_t *pgd_alloc(struct mm_struct *mm)
34{
35 return kmem_cache_alloc(pgd_cachep, PGALLOC_GFP);
36}
37
38void pgd_free(struct mm_struct *mm, pgd_t *pgd)
39{
40 kmem_cache_free(pgd_cachep, pgd);
41}
42
Paul Mundt782bb5a2010-01-13 19:11:14 +090043#if PAGETABLE_LEVELS > 2
Matt Fleming2a5eacc2009-12-31 12:19:24 +000044void pud_populate(struct mm_struct *mm, pud_t *pud, pmd_t *pmd)
45{
46 set_pud(pud, __pud((unsigned long)pmd));
47}
48
49pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
50{
51 return kmem_cache_alloc(pmd_cachep, PGALLOC_GFP);
52}
53
54void pmd_free(struct mm_struct *mm, pmd_t *pmd)
55{
56 kmem_cache_free(pmd_cachep, pmd);
57}
Paul Mundt782bb5a2010-01-13 19:11:14 +090058#endif /* PAGETABLE_LEVELS > 2 */