blob: 80c84cdf22efb9c66d21e95bb60d467a5124eea8 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/i386/mm/pgtable.c
3 */
4
5#include <linux/config.h>
6#include <linux/sched.h>
7#include <linux/kernel.h>
8#include <linux/errno.h>
9#include <linux/mm.h>
10#include <linux/swap.h>
11#include <linux/smp.h>
12#include <linux/highmem.h>
13#include <linux/slab.h>
14#include <linux/pagemap.h>
15#include <linux/spinlock.h>
16
17#include <asm/system.h>
18#include <asm/pgtable.h>
19#include <asm/pgalloc.h>
20#include <asm/fixmap.h>
21#include <asm/e820.h>
22#include <asm/tlb.h>
23#include <asm/tlbflush.h>
24
25void show_mem(void)
26{
27 int total = 0, reserved = 0;
28 int shared = 0, cached = 0;
29 int highmem = 0;
30 struct page *page;
31 pg_data_t *pgdat;
32 unsigned long i;
33
34 printk("Mem-info:\n");
35 show_free_areas();
36 printk("Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
37 for_each_pgdat(pgdat) {
38 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
Dave Hansen408fde82005-06-23 00:07:37 -070039 page = pgdat_page_nr(pgdat, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070040 total++;
41 if (PageHighMem(page))
42 highmem++;
43 if (PageReserved(page))
44 reserved++;
45 else if (PageSwapCache(page))
46 cached++;
47 else if (page_count(page))
48 shared += page_count(page) - 1;
49 }
50 }
51 printk("%d pages of RAM\n", total);
52 printk("%d pages of HIGHMEM\n",highmem);
53 printk("%d reserved pages\n",reserved);
54 printk("%d pages shared\n",shared);
55 printk("%d pages swap cached\n",cached);
56}
57
58/*
59 * Associate a virtual page frame with a given physical page frame
60 * and protection flags for that frame.
61 */
62static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
63{
64 pgd_t *pgd;
65 pud_t *pud;
66 pmd_t *pmd;
67 pte_t *pte;
68
69 pgd = swapper_pg_dir + pgd_index(vaddr);
70 if (pgd_none(*pgd)) {
71 BUG();
72 return;
73 }
74 pud = pud_offset(pgd, vaddr);
75 if (pud_none(*pud)) {
76 BUG();
77 return;
78 }
79 pmd = pmd_offset(pud, vaddr);
80 if (pmd_none(*pmd)) {
81 BUG();
82 return;
83 }
84 pte = pte_offset_kernel(pmd, vaddr);
85 /* <pfn,flags> stored as-is, to permit clearing entries */
86 set_pte(pte, pfn_pte(pfn, flags));
87
88 /*
89 * It's enough to flush this one mapping.
90 * (PGE mappings get flushed as well)
91 */
92 __flush_tlb_one(vaddr);
93}
94
95/*
96 * Associate a large virtual page frame with a given physical page frame
97 * and protection flags for that frame. pfn is for the base of the page,
98 * vaddr is what the page gets mapped to - both must be properly aligned.
99 * The pmd must already be instantiated. Assumes PAE mode.
100 */
101void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
102{
103 pgd_t *pgd;
104 pud_t *pud;
105 pmd_t *pmd;
106
107 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
108 printk ("set_pmd_pfn: vaddr misaligned\n");
109 return; /* BUG(); */
110 }
111 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
112 printk ("set_pmd_pfn: pfn misaligned\n");
113 return; /* BUG(); */
114 }
115 pgd = swapper_pg_dir + pgd_index(vaddr);
116 if (pgd_none(*pgd)) {
117 printk ("set_pmd_pfn: pgd_none\n");
118 return; /* BUG(); */
119 }
120 pud = pud_offset(pgd, vaddr);
121 pmd = pmd_offset(pud, vaddr);
122 set_pmd(pmd, pfn_pmd(pfn, flags));
123 /*
124 * It's enough to flush this one mapping.
125 * (PGE mappings get flushed as well)
126 */
127 __flush_tlb_one(vaddr);
128}
129
130void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
131{
132 unsigned long address = __fix_to_virt(idx);
133
134 if (idx >= __end_of_fixed_addresses) {
135 BUG();
136 return;
137 }
138 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
139}
140
141pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
142{
143 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
144}
145
146struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
147{
148 struct page *pte;
149
150#ifdef CONFIG_HIGHPTE
151 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
152#else
153 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
154#endif
155 return pte;
156}
157
158void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags)
159{
160 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
161}
162
163/*
164 * List of all pgd's needed for non-PAE so it can invalidate entries
165 * in both cached and uncached pgd's; not needed for PAE since the
166 * kernel pmd is shared. If PAE were not to share the pmd a similar
167 * tactic would be needed. This is essentially codepath-based locking
168 * against pageattr.c; it is the unique case in which a valid change
169 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
170 * vmalloc faults work because attached pagetables are never freed.
171 * The locking scheme was chosen on the basis of manfred's
172 * recommendations and having no core impact whatsoever.
173 * -- wli
174 */
175DEFINE_SPINLOCK(pgd_lock);
176struct page *pgd_list;
177
178static inline void pgd_list_add(pgd_t *pgd)
179{
180 struct page *page = virt_to_page(pgd);
181 page->index = (unsigned long)pgd_list;
182 if (pgd_list)
183 pgd_list->private = (unsigned long)&page->index;
184 pgd_list = page;
185 page->private = (unsigned long)&pgd_list;
186}
187
188static inline void pgd_list_del(pgd_t *pgd)
189{
190 struct page *next, **pprev, *page = virt_to_page(pgd);
191 next = (struct page *)page->index;
192 pprev = (struct page **)page->private;
193 *pprev = next;
194 if (next)
195 next->private = (unsigned long)pprev;
196}
197
198void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused)
199{
200 unsigned long flags;
201
202 if (PTRS_PER_PMD == 1)
203 spin_lock_irqsave(&pgd_lock, flags);
204
205 memcpy((pgd_t *)pgd + USER_PTRS_PER_PGD,
206 swapper_pg_dir + USER_PTRS_PER_PGD,
207 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
208
209 if (PTRS_PER_PMD > 1)
210 return;
211
212 pgd_list_add(pgd);
213 spin_unlock_irqrestore(&pgd_lock, flags);
214 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
215}
216
217/* never called when PTRS_PER_PMD > 1 */
218void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused)
219{
220 unsigned long flags; /* can be called from interrupt context */
221
222 spin_lock_irqsave(&pgd_lock, flags);
223 pgd_list_del(pgd);
224 spin_unlock_irqrestore(&pgd_lock, flags);
225}
226
227pgd_t *pgd_alloc(struct mm_struct *mm)
228{
229 int i;
230 pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL);
231
232 if (PTRS_PER_PMD == 1 || !pgd)
233 return pgd;
234
235 for (i = 0; i < USER_PTRS_PER_PGD; ++i) {
236 pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
237 if (!pmd)
238 goto out_oom;
239 set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
240 }
241 return pgd;
242
243out_oom:
244 for (i--; i >= 0; i--)
245 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
246 kmem_cache_free(pgd_cache, pgd);
247 return NULL;
248}
249
250void pgd_free(pgd_t *pgd)
251{
252 int i;
253
254 /* in the PAE case user pgd entries are overwritten before usage */
255 if (PTRS_PER_PMD > 1)
256 for (i = 0; i < USER_PTRS_PER_PGD; ++i)
257 kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1));
Hugh Dickinse0da3822005-04-19 13:29:15 -0700258 /* in the non-PAE case, free_pgtables() clears user pgd entries */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 kmem_cache_free(pgd_cache, pgd);
260}