blob: 3a6c9200058d6dd962c0956792310041e9cab9eb [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/i386/mm/pgtable.c
3 */
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/sched.h>
6#include <linux/kernel.h>
7#include <linux/errno.h>
8#include <linux/mm.h>
Prarit Bhargava27eb0b22007-10-17 18:04:34 +02009#include <linux/nmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/swap.h>
11#include <linux/smp.h>
12#include <linux/highmem.h>
13#include <linux/slab.h>
14#include <linux/pagemap.h>
15#include <linux/spinlock.h>
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -070016#include <linux/module.h>
Christoph Lameterf1d1a842007-05-12 11:15:24 -070017#include <linux/quicklist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <asm/system.h>
20#include <asm/pgtable.h>
21#include <asm/pgalloc.h>
22#include <asm/fixmap.h>
23#include <asm/e820.h>
24#include <asm/tlb.h>
25#include <asm/tlbflush.h>
26
27void show_mem(void)
28{
29 int total = 0, reserved = 0;
30 int shared = 0, cached = 0;
31 int highmem = 0;
32 struct page *page;
33 pg_data_t *pgdat;
34 unsigned long i;
Dave Hansen208d54e2005-10-29 18:16:52 -070035 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Christophe Lucasf90e7182005-06-25 14:59:24 -070037 printk(KERN_INFO "Mem-info:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 show_free_areas();
Christophe Lucasf90e7182005-06-25 14:59:24 -070039 printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10));
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -080040 for_each_online_pgdat(pgdat) {
Dave Hansen208d54e2005-10-29 18:16:52 -070041 pgdat_resize_lock(pgdat, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070042 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
Prarit Bhargava27eb0b22007-10-17 18:04:34 +020043 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
44 touch_nmi_watchdog();
Dave Hansen408fde82005-06-23 00:07:37 -070045 page = pgdat_page_nr(pgdat, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070046 total++;
47 if (PageHighMem(page))
48 highmem++;
49 if (PageReserved(page))
50 reserved++;
51 else if (PageSwapCache(page))
52 cached++;
53 else if (page_count(page))
54 shared += page_count(page) - 1;
55 }
Dave Hansen208d54e2005-10-29 18:16:52 -070056 pgdat_resize_unlock(pgdat, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070057 }
Christophe Lucasf90e7182005-06-25 14:59:24 -070058 printk(KERN_INFO "%d pages of RAM\n", total);
59 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
60 printk(KERN_INFO "%d reserved pages\n", reserved);
61 printk(KERN_INFO "%d pages shared\n", shared);
62 printk(KERN_INFO "%d pages swap cached\n", cached);
Martin J. Bligh6f4e1e52005-06-23 00:08:08 -070063
Christoph Lameterb1e7a8f2006-06-30 01:55:39 -070064 printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
Christoph Lameterce866b32006-06-30 01:55:40 -070065 printk(KERN_INFO "%lu pages writeback\n",
66 global_page_state(NR_WRITEBACK));
Christoph Lameter65ba55f2006-06-30 01:55:34 -070067 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
Christoph Lameter972d1a72006-09-25 23:31:51 -070068 printk(KERN_INFO "%lu pages slab\n",
69 global_page_state(NR_SLAB_RECLAIMABLE) +
70 global_page_state(NR_SLAB_UNRECLAIMABLE));
Christoph Lameterdf849a12006-06-30 01:55:38 -070071 printk(KERN_INFO "%lu pages pagetables\n",
72 global_page_state(NR_PAGETABLE));
Linus Torvalds1da177e2005-04-16 15:20:36 -070073}
74
75/*
76 * Associate a virtual page frame with a given physical page frame
77 * and protection flags for that frame.
78 */
79static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
80{
81 pgd_t *pgd;
82 pud_t *pud;
83 pmd_t *pmd;
84 pte_t *pte;
85
86 pgd = swapper_pg_dir + pgd_index(vaddr);
87 if (pgd_none(*pgd)) {
88 BUG();
89 return;
90 }
91 pud = pud_offset(pgd, vaddr);
92 if (pud_none(*pud)) {
93 BUG();
94 return;
95 }
96 pmd = pmd_offset(pud, vaddr);
97 if (pmd_none(*pmd)) {
98 BUG();
99 return;
100 }
101 pte = pte_offset_kernel(pmd, vaddr);
Jan Beulichb0bfece2006-12-07 02:14:09 +0100102 if (pgprot_val(flags))
Jan Beulichaa506dc2007-10-17 18:04:33 +0200103 set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags));
Jan Beulichb0bfece2006-12-07 02:14:09 +0100104 else
105 pte_clear(&init_mm, vaddr, pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106
107 /*
108 * It's enough to flush this one mapping.
109 * (PGE mappings get flushed as well)
110 */
111 __flush_tlb_one(vaddr);
112}
113
114/*
115 * Associate a large virtual page frame with a given physical page frame
116 * and protection flags for that frame. pfn is for the base of the page,
117 * vaddr is what the page gets mapped to - both must be properly aligned.
118 * The pmd must already be instantiated. Assumes PAE mode.
119 */
120void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
121{
122 pgd_t *pgd;
123 pud_t *pud;
124 pmd_t *pmd;
125
126 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
Christophe Lucasf90e7182005-06-25 14:59:24 -0700127 printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128 return; /* BUG(); */
129 }
130 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
Christophe Lucasf90e7182005-06-25 14:59:24 -0700131 printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132 return; /* BUG(); */
133 }
134 pgd = swapper_pg_dir + pgd_index(vaddr);
135 if (pgd_none(*pgd)) {
Christophe Lucasf90e7182005-06-25 14:59:24 -0700136 printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700137 return; /* BUG(); */
138 }
139 pud = pud_offset(pgd, vaddr);
140 pmd = pmd_offset(pud, vaddr);
141 set_pmd(pmd, pfn_pmd(pfn, flags));
142 /*
143 * It's enough to flush this one mapping.
144 * (PGE mappings get flushed as well)
145 */
146 __flush_tlb_one(vaddr);
147}
148
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700149static int fixmaps;
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700150unsigned long __FIXADDR_TOP = 0xfffff000;
151EXPORT_SYMBOL(__FIXADDR_TOP);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700152
Linus Torvalds1da177e2005-04-16 15:20:36 -0700153void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
154{
155 unsigned long address = __fix_to_virt(idx);
156
157 if (idx >= __end_of_fixed_addresses) {
158 BUG();
159 return;
160 }
161 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700162 fixmaps++;
163}
164
165/**
166 * reserve_top_address - reserves a hole in the top of kernel address space
167 * @reserve - size of hole to reserve
168 *
169 * Can be used to relocate the fixmap area and poke a hole in the top
170 * of kernel address space to make room for a hypervisor.
171 */
172void reserve_top_address(unsigned long reserve)
173{
174 BUG_ON(fixmaps > 0);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100175 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
176 (int)-reserve);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700177 __FIXADDR_TOP = -reserve - PAGE_SIZE;
178 __VMALLOC_RESERVE += reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700179}
180
181pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
182{
183 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
184}
185
186struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address)
187{
188 struct page *pte;
189
190#ifdef CONFIG_HIGHPTE
191 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
192#else
193 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
194#endif
195 return pte;
196}
197
Christoph Lameter4ba9b9d2007-10-16 23:25:51 -0700198void pmd_ctor(struct kmem_cache *cache, void *pmd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
200 memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t));
201}
202
203/*
204 * List of all pgd's needed for non-PAE so it can invalidate entries
205 * in both cached and uncached pgd's; not needed for PAE since the
206 * kernel pmd is shared. If PAE were not to share the pmd a similar
207 * tactic would be needed. This is essentially codepath-based locking
208 * against pageattr.c; it is the unique case in which a valid change
209 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
210 * vmalloc faults work because attached pagetables are never freed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211 * -- wli
212 */
213DEFINE_SPINLOCK(pgd_lock);
214struct page *pgd_list;
215
216static inline void pgd_list_add(pgd_t *pgd)
217{
218 struct page *page = virt_to_page(pgd);
219 page->index = (unsigned long)pgd_list;
220 if (pgd_list)
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700221 set_page_private(pgd_list, (unsigned long)&page->index);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 pgd_list = page;
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700223 set_page_private(page, (unsigned long)&pgd_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224}
225
226static inline void pgd_list_del(pgd_t *pgd)
227{
228 struct page *next, **pprev, *page = virt_to_page(pgd);
229 next = (struct page *)page->index;
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700230 pprev = (struct page **)page_private(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 *pprev = next;
232 if (next)
Hugh Dickins4c21e2f2005-10-29 18:16:40 -0700233 set_page_private(next, (unsigned long)pprev);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234}
235
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700236
237
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200238#if (PTRS_PER_PMD == 1)
239/* Non-PAE pgd constructor */
Adrian Bunk23785692007-07-21 17:11:07 +0200240static void pgd_ctor(void *pgd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700241{
242 unsigned long flags;
243
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200244 /* !PAE, no pagetable sharing */
245 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200247 spin_lock_irqsave(&pgd_lock, flags);
248
249 /* must happen under lock */
Zachary Amsdend7271b12005-09-03 15:56:50 -0700250 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251 swapper_pg_dir + USER_PTRS_PER_PGD,
Zachary Amsdend7271b12005-09-03 15:56:50 -0700252 KERNEL_PGD_PTRS);
Zachary Amsdenc119ecc2007-02-13 13:26:21 +0100253 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200254 __pa(swapper_pg_dir) >> PAGE_SHIFT,
255 USER_PTRS_PER_PGD,
256 KERNEL_PGD_PTRS);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 pgd_list_add(pgd);
258 spin_unlock_irqrestore(&pgd_lock, flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259}
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200260#else /* PTRS_PER_PMD > 1 */
261/* PAE pgd constructor */
Adrian Bunk23785692007-07-21 17:11:07 +0200262static void pgd_ctor(void *pgd)
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200263{
264 /* PAE, kernel PMD may be shared */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200266 if (SHARED_KERNEL_PMD) {
267 clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD,
268 swapper_pg_dir + USER_PTRS_PER_PGD,
269 KERNEL_PGD_PTRS);
270 } else {
271 unsigned long flags;
272
273 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
274 spin_lock_irqsave(&pgd_lock, flags);
275 pgd_list_add(pgd);
276 spin_unlock_irqrestore(&pgd_lock, flags);
277 }
278}
279#endif /* PTRS_PER_PMD */
280
Adrian Bunk23785692007-07-21 17:11:07 +0200281static void pgd_dtor(void *pgd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282{
283 unsigned long flags; /* can be called from interrupt context */
284
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700285 if (SHARED_KERNEL_PMD)
286 return;
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200287
Zachary Amsdenc119ecc2007-02-13 13:26:21 +0100288 paravirt_release_pd(__pa(pgd) >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 spin_lock_irqsave(&pgd_lock, flags);
290 pgd_list_del(pgd);
291 spin_unlock_irqrestore(&pgd_lock, flags);
292}
293
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200294#define UNSHARED_PTRS_PER_PGD \
295 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
296
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100297#ifdef CONFIG_X86_PAE
298/*
299 * Mop up any pmd pages which may still be attached to the pgd.
300 * Normally they will be freed by munmap/exit_mmap, but any pmd we
301 * preallocate which never got a corresponding vma will need to be
302 * freed manually.
303 */
304static void pgd_mop_up_pmds(pgd_t *pgdp)
305{
306 int i;
307
Jeremy Fitzhardinge508bebb2008-01-30 13:33:40 +0100308 for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100309 pgd_t pgd = pgdp[i];
310
311 if (pgd_val(pgd) != 0) {
312 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
313
314 pgdp[i] = native_make_pgd(0);
315
316 paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
317 pmd_free(pmd);
318 }
319 }
320}
321
322/*
323 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
324 * updating the top-level pagetable entries to guarantee the
325 * processor notices the update. Since this is expensive, and
326 * all 4 top-level entries are used almost immediately in a
327 * new process's life, we just pre-populate them here.
Jeremy Fitzhardinge508bebb2008-01-30 13:33:40 +0100328 *
329 * Also, if we're in a paravirt environment where the kernel pmd is
330 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
331 * and initialize the kernel pmds here.
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100332 */
333static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
334{
335 pud_t *pud;
336 unsigned long addr;
337 int i;
338
339 pud = pud_offset(pgd, 0);
Jeremy Fitzhardinge508bebb2008-01-30 13:33:40 +0100340 for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
341 i++, pud++, addr += PUD_SIZE) {
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100342 pmd_t *pmd = pmd_alloc_one(mm, addr);
343
344 if (!pmd) {
345 pgd_mop_up_pmds(pgd);
346 return 0;
347 }
348
Jeremy Fitzhardinge508bebb2008-01-30 13:33:40 +0100349 if (i >= USER_PTRS_PER_PGD)
350 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
351 sizeof(pmd_t) * PTRS_PER_PMD);
352
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100353 pud_populate(mm, pud, pmd);
354 }
355
356 return 1;
357}
358#else /* !CONFIG_X86_PAE */
359/* No need to prepopulate any pagetable entries in non-PAE modes. */
360static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
361{
362 return 1;
363}
364
365static void pgd_mop_up_pmds(pgd_t *pgd)
366{
367}
368#endif /* CONFIG_X86_PAE */
369
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200370/* If we allocate a pmd for part of the kernel address space, then
371 make sure its initialized with the appropriate kernel mappings.
372 Otherwise use a cached zeroed pmd. */
373static pmd_t *pmd_cache_alloc(int idx)
374{
375 pmd_t *pmd;
376
377 if (idx >= USER_PTRS_PER_PGD) {
378 pmd = (pmd_t *)__get_free_page(GFP_KERNEL);
379
380 if (pmd)
381 memcpy(pmd,
382 (void *)pgd_page_vaddr(swapper_pg_dir[idx]),
383 sizeof(pmd_t) * PTRS_PER_PMD);
384 } else
385 pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL);
386
387 return pmd;
388}
389
390static void pmd_cache_free(pmd_t *pmd, int idx)
391{
392 if (idx >= USER_PTRS_PER_PGD)
393 free_page((unsigned long)pmd);
394 else
395 kmem_cache_free(pmd_cache, pmd);
396}
397
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398pgd_t *pgd_alloc(struct mm_struct *mm)
399{
400 int i;
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700401 pgd_t *pgd = quicklist_alloc(0, GFP_KERNEL, pgd_ctor);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402
403 if (PTRS_PER_PMD == 1 || !pgd)
404 return pgd;
405
Jeremy Fitzhardinge6c435452008-01-30 13:33:39 +0100406 mm->pgd = pgd; /* so that alloc_pd can use it */
407
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200408 for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
409 pmd_t *pmd = pmd_cache_alloc(i);
410
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 if (!pmd)
412 goto out_oom;
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200413
Jeremy Fitzhardinge6c435452008-01-30 13:33:39 +0100414 paravirt_alloc_pd(mm, __pa(pmd) >> PAGE_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 set_pgd(&pgd[i], __pgd(1 + __pa(pmd)));
416 }
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100417 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
418 quicklist_free(0, pgd_dtor, pgd);
419 pgd = NULL;
420 }
421
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 return pgd;
423
424out_oom:
Zachary Amsdenc119ecc2007-02-13 13:26:21 +0100425 for (i--; i >= 0; i--) {
426 pgd_t pgdent = pgd[i];
427 void* pmd = (void *)__va(pgd_val(pgdent)-1);
428 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200429 pmd_cache_free(pmd, i);
Zachary Amsdenc119ecc2007-02-13 13:26:21 +0100430 }
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700431 quicklist_free(0, pgd_dtor, pgd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 return NULL;
433}
434
435void pgd_free(pgd_t *pgd)
436{
437 int i;
438
439 /* in the PAE case user pgd entries are overwritten before usage */
440 if (PTRS_PER_PMD > 1)
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200441 for (i = 0; i < UNSHARED_PTRS_PER_PGD; ++i) {
Zachary Amsdenc119ecc2007-02-13 13:26:21 +0100442 pgd_t pgdent = pgd[i];
443 void* pmd = (void *)__va(pgd_val(pgdent)-1);
444 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200445 pmd_cache_free(pmd, i);
Zachary Amsdenc119ecc2007-02-13 13:26:21 +0100446 }
Hugh Dickinse0da3822005-04-19 13:29:15 -0700447 /* in the non-PAE case, free_pgtables() clears user pgd entries */
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100448 pgd_mop_up_pmds(pgd);
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700449 quicklist_free(0, pgd_dtor, pgd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450}
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700451
452void check_pgt_cache(void)
453{
454 quicklist_trim(0, pgd_dtor, 25, 16);
455}