blob: 4eaec0f3525b24e982ed1c59ad973b1457a77372 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* pgalloc.c: page directory & page table allocation
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sched.h>
13#include <linux/slab.h>
14#include <linux/mm.h>
15#include <linux/highmem.h>
16#include <asm/pgalloc.h>
17#include <asm/page.h>
18#include <asm/cacheflush.h>
19
20pgd_t swapper_pg_dir[PTRS_PER_PGD] __attribute__((aligned(PAGE_SIZE)));
21kmem_cache_t *pgd_cache;
22
23pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
24{
25 pte_t *pte = (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT);
26 if (pte)
27 clear_page(pte);
28 return pte;
29}
30
31struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
32{
33 struct page *page;
34
35#ifdef CONFIG_HIGHPTE
36 page = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT, 0);
37#else
38 page = alloc_pages(GFP_KERNEL|__GFP_REPEAT, 0);
39#endif
40 if (page)
41 clear_highpage(page);
42 flush_dcache_page(page);
43 return page;
44}
45
46void __set_pmd(pmd_t *pmdptr, unsigned long pmd)
47{
48 unsigned long *__ste_p = pmdptr->ste;
49 int loop;
50
51 if (!pmd) {
52 memset(__ste_p, 0, PME_SIZE);
53 }
54 else {
55 BUG_ON(pmd & (0x3f00 | xAMPRx_SS | 0xe));
56
57 for (loop = PME_SIZE; loop > 0; loop -= 4) {
58 *__ste_p++ = pmd;
59 pmd += __frv_PT_SIZE;
60 }
61 }
62
63 frv_dcache_writeback((unsigned long) pmdptr, (unsigned long) (pmdptr + 1));
64}
65
66/*
67 * List of all pgd's needed for non-PAE so it can invalidate entries
68 * in both cached and uncached pgd's; not needed for PAE since the
69 * kernel pmd is shared. If PAE were not to share the pmd a similar
70 * tactic would be needed. This is essentially codepath-based locking
71 * against pageattr.c; it is the unique case in which a valid change
72 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
73 * vmalloc faults work because attached pagetables are never freed.
74 * If the locking proves to be non-performant, a ticketing scheme with
75 * checks at dup_mmap(), exec(), and other mmlist addition points
76 * could be used. The locking scheme was chosen on the basis of
77 * manfred's recommendations and having no core impact whatsoever.
78 * -- wli
79 */
80DEFINE_SPINLOCK(pgd_lock);
81struct page *pgd_list;
82
83static inline void pgd_list_add(pgd_t *pgd)
84{
85 struct page *page = virt_to_page(pgd);
86 page->index = (unsigned long) pgd_list;
87 if (pgd_list)
88 pgd_list->private = (unsigned long) &page->index;
89 pgd_list = page;
90 page->private = (unsigned long) &pgd_list;
91}
92
93static inline void pgd_list_del(pgd_t *pgd)
94{
95 struct page *next, **pprev, *page = virt_to_page(pgd);
96 next = (struct page *) page->index;
97 pprev = (struct page **) page->private;
98 *pprev = next;
99 if (next)
100 next->private = (unsigned long) pprev;
101}
102
103void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
104{
105 unsigned long flags;
106
107 if (PTRS_PER_PMD == 1)
108 spin_lock_irqsave(&pgd_lock, flags);
109
110 memcpy((pgd_t *) pgd + USER_PGDS_IN_LAST_PML4,
111 swapper_pg_dir + USER_PGDS_IN_LAST_PML4,
112 (PTRS_PER_PGD - USER_PGDS_IN_LAST_PML4) * sizeof(pgd_t));
113
114 if (PTRS_PER_PMD > 1)
115 return;
116
117 pgd_list_add(pgd);
118 spin_unlock_irqrestore(&pgd_lock, flags);
119 memset(pgd, 0, USER_PGDS_IN_LAST_PML4 * sizeof(pgd_t));
120}
121
122/* never called when PTRS_PER_PMD > 1 */
123void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
124{
125 unsigned long flags; /* can be called from interrupt context */
126
127 spin_lock_irqsave(&pgd_lock, flags);
128 pgd_list_del(pgd);
129 spin_unlock_irqrestore(&pgd_lock, flags);
130}
131
132pgd_t *pgd_alloc(struct mm_struct *mm)
133{
134 pgd_t *pgd;
135
136 pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
137 if (!pgd)
138 return pgd;
139
140 return pgd;
141}
142
143void pgd_free(pgd_t *pgd)
144{
145 /* in the non-PAE case, clear_page_tables() clears user pgd entries */
146 kmem_cache_free(pgd_cache, pgd);
147}
148
149void __init pgtable_cache_init(void)
150{
151 pgd_cache = kmem_cache_create("pgd",
152 PTRS_PER_PGD * sizeof(pgd_t),
153 PTRS_PER_PGD * sizeof(pgd_t),
154 0,
155 pgd_ctor,
156 pgd_dtor);
157 if (!pgd_cache)
158 panic("pgtable_cache_init(): Cannot create pgd cache");
159}