blob: 3165ec0672bd1855cb607c6864b83c810c29d729 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * linux/arch/i386/mm/pgtable.c
3 */
4
Linus Torvalds1da177e2005-04-16 15:20:36 -07005#include <linux/sched.h>
6#include <linux/kernel.h>
7#include <linux/errno.h>
8#include <linux/mm.h>
Prarit Bhargava27eb0b22007-10-17 18:04:34 +02009#include <linux/nmi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/swap.h>
11#include <linux/smp.h>
12#include <linux/highmem.h>
13#include <linux/slab.h>
14#include <linux/pagemap.h>
15#include <linux/spinlock.h>
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -070016#include <linux/module.h>
Christoph Lameterf1d1a842007-05-12 11:15:24 -070017#include <linux/quicklist.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070018
19#include <asm/system.h>
20#include <asm/pgtable.h>
21#include <asm/pgalloc.h>
22#include <asm/fixmap.h>
23#include <asm/e820.h>
24#include <asm/tlb.h>
25#include <asm/tlbflush.h>
26
27void show_mem(void)
28{
29 int total = 0, reserved = 0;
30 int shared = 0, cached = 0;
31 int highmem = 0;
32 struct page *page;
33 pg_data_t *pgdat;
34 unsigned long i;
Dave Hansen208d54e2005-10-29 18:16:52 -070035 unsigned long flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -070036
Christophe Lucasf90e7182005-06-25 14:59:24 -070037 printk(KERN_INFO "Mem-info:\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -070038 show_free_areas();
KAMEZAWA Hiroyukiec936fc2006-03-27 01:15:59 -080039 for_each_online_pgdat(pgdat) {
Dave Hansen208d54e2005-10-29 18:16:52 -070040 pgdat_resize_lock(pgdat, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070041 for (i = 0; i < pgdat->node_spanned_pages; ++i) {
Prarit Bhargava27eb0b22007-10-17 18:04:34 +020042 if (unlikely(i % MAX_ORDER_NR_PAGES == 0))
43 touch_nmi_watchdog();
Dave Hansen408fde82005-06-23 00:07:37 -070044 page = pgdat_page_nr(pgdat, i);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045 total++;
46 if (PageHighMem(page))
47 highmem++;
48 if (PageReserved(page))
49 reserved++;
50 else if (PageSwapCache(page))
51 cached++;
52 else if (page_count(page))
53 shared += page_count(page) - 1;
54 }
Dave Hansen208d54e2005-10-29 18:16:52 -070055 pgdat_resize_unlock(pgdat, &flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070056 }
Christophe Lucasf90e7182005-06-25 14:59:24 -070057 printk(KERN_INFO "%d pages of RAM\n", total);
58 printk(KERN_INFO "%d pages of HIGHMEM\n", highmem);
59 printk(KERN_INFO "%d reserved pages\n", reserved);
60 printk(KERN_INFO "%d pages shared\n", shared);
61 printk(KERN_INFO "%d pages swap cached\n", cached);
Martin J. Bligh6f4e1e52005-06-23 00:08:08 -070062
Christoph Lameterb1e7a8f2006-06-30 01:55:39 -070063 printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY));
Christoph Lameterce866b32006-06-30 01:55:40 -070064 printk(KERN_INFO "%lu pages writeback\n",
65 global_page_state(NR_WRITEBACK));
Christoph Lameter65ba55f2006-06-30 01:55:34 -070066 printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED));
Christoph Lameter972d1a72006-09-25 23:31:51 -070067 printk(KERN_INFO "%lu pages slab\n",
68 global_page_state(NR_SLAB_RECLAIMABLE) +
69 global_page_state(NR_SLAB_UNRECLAIMABLE));
Christoph Lameterdf849a12006-06-30 01:55:38 -070070 printk(KERN_INFO "%lu pages pagetables\n",
71 global_page_state(NR_PAGETABLE));
Linus Torvalds1da177e2005-04-16 15:20:36 -070072}
73
74/*
75 * Associate a virtual page frame with a given physical page frame
76 * and protection flags for that frame.
77 */
78static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
79{
80 pgd_t *pgd;
81 pud_t *pud;
82 pmd_t *pmd;
83 pte_t *pte;
84
85 pgd = swapper_pg_dir + pgd_index(vaddr);
86 if (pgd_none(*pgd)) {
87 BUG();
88 return;
89 }
90 pud = pud_offset(pgd, vaddr);
91 if (pud_none(*pud)) {
92 BUG();
93 return;
94 }
95 pmd = pmd_offset(pud, vaddr);
96 if (pmd_none(*pmd)) {
97 BUG();
98 return;
99 }
100 pte = pte_offset_kernel(pmd, vaddr);
Jan Beulichb0bfece2006-12-07 02:14:09 +0100101 if (pgprot_val(flags))
Jan Beulichaa506dc2007-10-17 18:04:33 +0200102 set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags));
Jan Beulichb0bfece2006-12-07 02:14:09 +0100103 else
104 pte_clear(&init_mm, vaddr, pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 /*
107 * It's enough to flush this one mapping.
108 * (PGE mappings get flushed as well)
109 */
110 __flush_tlb_one(vaddr);
111}
112
113/*
114 * Associate a large virtual page frame with a given physical page frame
115 * and protection flags for that frame. pfn is for the base of the page,
116 * vaddr is what the page gets mapped to - both must be properly aligned.
117 * The pmd must already be instantiated. Assumes PAE mode.
118 */
119void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags)
120{
121 pgd_t *pgd;
122 pud_t *pud;
123 pmd_t *pmd;
124
125 if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */
Christophe Lucasf90e7182005-06-25 14:59:24 -0700126 printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 return; /* BUG(); */
128 }
129 if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */
Christophe Lucasf90e7182005-06-25 14:59:24 -0700130 printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131 return; /* BUG(); */
132 }
133 pgd = swapper_pg_dir + pgd_index(vaddr);
134 if (pgd_none(*pgd)) {
Christophe Lucasf90e7182005-06-25 14:59:24 -0700135 printk(KERN_WARNING "set_pmd_pfn: pgd_none\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 return; /* BUG(); */
137 }
138 pud = pud_offset(pgd, vaddr);
139 pmd = pmd_offset(pud, vaddr);
140 set_pmd(pmd, pfn_pmd(pfn, flags));
141 /*
142 * It's enough to flush this one mapping.
143 * (PGE mappings get flushed as well)
144 */
145 __flush_tlb_one(vaddr);
146}
147
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700148static int fixmaps;
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700149unsigned long __FIXADDR_TOP = 0xfffff000;
150EXPORT_SYMBOL(__FIXADDR_TOP);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700151
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags)
153{
154 unsigned long address = __fix_to_virt(idx);
155
156 if (idx >= __end_of_fixed_addresses) {
157 BUG();
158 return;
159 }
160 set_pte_pfn(address, phys >> PAGE_SHIFT, flags);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700161 fixmaps++;
162}
163
164/**
165 * reserve_top_address - reserves a hole in the top of kernel address space
166 * @reserve - size of hole to reserve
167 *
168 * Can be used to relocate the fixmap area and poke a hole in the top
169 * of kernel address space to make room for a hypervisor.
170 */
171void reserve_top_address(unsigned long reserve)
172{
173 BUG_ON(fixmaps > 0);
Zachary Amsden7ce0bcf2007-02-13 13:26:21 +0100174 printk(KERN_INFO "Reserving virtual address space above 0x%08x\n",
175 (int)-reserve);
Jeremy Fitzhardinge052e7992006-09-25 23:32:25 -0700176 __FIXADDR_TOP = -reserve - PAGE_SIZE;
177 __VMALLOC_RESERVE += reserve;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178}
179
180pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
181{
182 return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO);
183}
184
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800185pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
187 struct page *pte;
188
189#ifdef CONFIG_HIGHPTE
190 pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0);
191#else
192 pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0);
193#endif
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800194 if (pte)
195 pgtable_page_ctor(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700196 return pte;
197}
198
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199/*
200 * List of all pgd's needed for non-PAE so it can invalidate entries
201 * in both cached and uncached pgd's; not needed for PAE since the
202 * kernel pmd is shared. If PAE were not to share the pmd a similar
203 * tactic would be needed. This is essentially codepath-based locking
204 * against pageattr.c; it is the unique case in which a valid change
205 * of kernel pagetables can't be lazily synchronized by vmalloc faults.
206 * vmalloc faults work because attached pagetables are never freed.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 * -- wli
208 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209static inline void pgd_list_add(pgd_t *pgd)
210{
211 struct page *page = virt_to_page(pgd);
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +0100212
213 list_add(&page->lru, &pgd_list);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214}
215
216static inline void pgd_list_del(pgd_t *pgd)
217{
Jeremy Fitzhardingee3ed9102008-01-30 13:34:11 +0100218 struct page *page = virt_to_page(pgd);
219
220 list_del(&page->lru);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221}
222
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100223#define UNSHARED_PTRS_PER_PGD \
224 (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD)
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700225
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100226static void pgd_ctor(void *p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700227{
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100228 pgd_t *pgd = p;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700229 unsigned long flags;
230
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100231 /* Clear usermode parts of PGD */
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200232 memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200234 spin_lock_irqsave(&pgd_lock, flags);
235
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100236 /* If the pgd points to a shared pagetable level (either the
237 ptes in non-PAE, or shared PMD in PAE), then just copy the
238 references from swapper_pg_dir. */
239 if (PAGETABLE_LEVELS == 2 ||
240 (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) {
241 clone_pgd_range(pgd + USER_PTRS_PER_PGD,
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200242 swapper_pg_dir + USER_PTRS_PER_PGD,
243 KERNEL_PGD_PTRS);
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100244 paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT,
245 __pa(swapper_pg_dir) >> PAGE_SHIFT,
246 USER_PTRS_PER_PGD,
247 KERNEL_PGD_PTRS);
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200248 }
Jeremy Fitzhardingee618c952008-02-04 16:48:02 +0100249
250 /* list required to sync kernel mapping updates */
251 if (!SHARED_KERNEL_PMD)
252 pgd_list_add(pgd);
253
254 spin_unlock_irqrestore(&pgd_lock, flags);
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200255}
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200256
Adrian Bunk23785692007-07-21 17:11:07 +0200257static void pgd_dtor(void *pgd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258{
259 unsigned long flags; /* can be called from interrupt context */
260
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700261 if (SHARED_KERNEL_PMD)
262 return;
Jeremy Fitzhardinge5311ab62007-05-02 19:27:13 +0200263
Linus Torvalds1da177e2005-04-16 15:20:36 -0700264 spin_lock_irqsave(&pgd_lock, flags);
265 pgd_list_del(pgd);
266 spin_unlock_irqrestore(&pgd_lock, flags);
267}
268
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100269#ifdef CONFIG_X86_PAE
270/*
271 * Mop up any pmd pages which may still be attached to the pgd.
272 * Normally they will be freed by munmap/exit_mmap, but any pmd we
273 * preallocate which never got a corresponding vma will need to be
274 * freed manually.
275 */
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800276static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100277{
278 int i;
279
Jeremy Fitzhardinge508bebb2008-01-30 13:33:40 +0100280 for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) {
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100281 pgd_t pgd = pgdp[i];
282
283 if (pgd_val(pgd) != 0) {
284 pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd);
285
286 pgdp[i] = native_make_pgd(0);
287
288 paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT);
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800289 pmd_free(mm, pmd);
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100290 }
291 }
292}
293
294/*
295 * In PAE mode, we need to do a cr3 reload (=tlb flush) when
296 * updating the top-level pagetable entries to guarantee the
297 * processor notices the update. Since this is expensive, and
298 * all 4 top-level entries are used almost immediately in a
299 * new process's life, we just pre-populate them here.
Jeremy Fitzhardinge508bebb2008-01-30 13:33:40 +0100300 *
301 * Also, if we're in a paravirt environment where the kernel pmd is
302 * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate
303 * and initialize the kernel pmds here.
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100304 */
305static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
306{
307 pud_t *pud;
308 unsigned long addr;
309 int i;
310
311 pud = pud_offset(pgd, 0);
Jeremy Fitzhardinge508bebb2008-01-30 13:33:40 +0100312 for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD;
313 i++, pud++, addr += PUD_SIZE) {
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100314 pmd_t *pmd = pmd_alloc_one(mm, addr);
315
316 if (!pmd) {
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800317 pgd_mop_up_pmds(mm, pgd);
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100318 return 0;
319 }
320
Jeremy Fitzhardinge508bebb2008-01-30 13:33:40 +0100321 if (i >= USER_PTRS_PER_PGD)
322 memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]),
323 sizeof(pmd_t) * PTRS_PER_PMD);
324
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100325 pud_populate(mm, pud, pmd);
326 }
327
328 return 1;
329}
330#else /* !CONFIG_X86_PAE */
331/* No need to prepopulate any pagetable entries in non-PAE modes. */
332static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd)
333{
334 return 1;
335}
336
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800337static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp)
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100338{
339}
340#endif /* CONFIG_X86_PAE */
341
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342pgd_t *pgd_alloc(struct mm_struct *mm)
343{
Thomas Gleixner985a34b2008-03-09 13:14:37 +0100344 pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
Thomas Gleixner985a34b2008-03-09 13:14:37 +0100346 /* so that alloc_pd can use it */
347 mm->pgd = pgd;
348 if (pgd)
349 pgd_ctor(pgd);
Jeremy Fitzhardinge6c435452008-01-30 13:33:39 +0100350
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100351 if (pgd && !pgd_prepopulate_pmd(mm, pgd)) {
Thomas Gleixner985a34b2008-03-09 13:14:37 +0100352 pgd_dtor(pgd);
353 free_page((unsigned long)pgd);
Jeremy Fitzhardinge8fe3dee2008-01-30 13:33:40 +0100354 pgd = NULL;
355 }
356
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 return pgd;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358}
359
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800360void pgd_free(struct mm_struct *mm, pgd_t *pgd)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361{
Benjamin Herrenschmidt5e541972008-02-04 22:29:14 -0800362 pgd_mop_up_pmds(mm, pgd);
Thomas Gleixner985a34b2008-03-09 13:14:37 +0100363 pgd_dtor(pgd);
364 free_page((unsigned long)pgd);
Christoph Lameterf1d1a842007-05-12 11:15:24 -0700365}
Ingo Molnar5aa05082008-01-31 22:05:48 +0100366
367void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte)
368{
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800369 pgtable_page_dtor(pte);
Ingo Molnar5aa05082008-01-31 22:05:48 +0100370 paravirt_release_pt(page_to_pfn(pte));
371 tlb_remove_page(tlb, pte);
372}
373
374#ifdef CONFIG_X86_PAE
375
376void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd)
377{
Ingo Molnar5aa05082008-01-31 22:05:48 +0100378 paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT);
379 tlb_remove_page(tlb, virt_to_page(pmd));
380}
381
382#endif
Ingo Molnar9fc34112008-03-03 09:53:17 +0100383
384int pmd_bad(pmd_t pmd)
385{
386 WARN_ON_ONCE(pmd_bad_v1(pmd) != pmd_bad_v2(pmd));
387
388 return pmd_bad_v1(pmd);
389}