Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/i386/mm/pgtable.c |
| 3 | */ |
| 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/sched.h> |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/errno.h> |
| 8 | #include <linux/mm.h> |
Prarit Bhargava | 27eb0b2 | 2007-10-17 18:04:34 +0200 | [diff] [blame] | 9 | #include <linux/nmi.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 10 | #include <linux/swap.h> |
| 11 | #include <linux/smp.h> |
| 12 | #include <linux/highmem.h> |
| 13 | #include <linux/slab.h> |
| 14 | #include <linux/pagemap.h> |
| 15 | #include <linux/spinlock.h> |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 16 | #include <linux/module.h> |
Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 17 | #include <linux/quicklist.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 18 | |
| 19 | #include <asm/system.h> |
| 20 | #include <asm/pgtable.h> |
| 21 | #include <asm/pgalloc.h> |
| 22 | #include <asm/fixmap.h> |
| 23 | #include <asm/e820.h> |
| 24 | #include <asm/tlb.h> |
| 25 | #include <asm/tlbflush.h> |
| 26 | |
| 27 | void show_mem(void) |
| 28 | { |
| 29 | int total = 0, reserved = 0; |
| 30 | int shared = 0, cached = 0; |
| 31 | int highmem = 0; |
| 32 | struct page *page; |
| 33 | pg_data_t *pgdat; |
| 34 | unsigned long i; |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 35 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 36 | |
Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 37 | printk(KERN_INFO "Mem-info:\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 38 | show_free_areas(); |
KAMEZAWA Hiroyuki | ec936fc | 2006-03-27 01:15:59 -0800 | [diff] [blame] | 39 | for_each_online_pgdat(pgdat) { |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 40 | pgdat_resize_lock(pgdat, &flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
Prarit Bhargava | 27eb0b2 | 2007-10-17 18:04:34 +0200 | [diff] [blame] | 42 | if (unlikely(i % MAX_ORDER_NR_PAGES == 0)) |
| 43 | touch_nmi_watchdog(); |
Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 44 | page = pgdat_page_nr(pgdat, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 45 | total++; |
| 46 | if (PageHighMem(page)) |
| 47 | highmem++; |
| 48 | if (PageReserved(page)) |
| 49 | reserved++; |
| 50 | else if (PageSwapCache(page)) |
| 51 | cached++; |
| 52 | else if (page_count(page)) |
| 53 | shared += page_count(page) - 1; |
| 54 | } |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 55 | pgdat_resize_unlock(pgdat, &flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 56 | } |
Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 57 | printk(KERN_INFO "%d pages of RAM\n", total); |
| 58 | printk(KERN_INFO "%d pages of HIGHMEM\n", highmem); |
| 59 | printk(KERN_INFO "%d reserved pages\n", reserved); |
| 60 | printk(KERN_INFO "%d pages shared\n", shared); |
| 61 | printk(KERN_INFO "%d pages swap cached\n", cached); |
Martin J. Bligh | 6f4e1e5 | 2005-06-23 00:08:08 -0700 | [diff] [blame] | 62 | |
Christoph Lameter | b1e7a8f | 2006-06-30 01:55:39 -0700 | [diff] [blame] | 63 | printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); |
Christoph Lameter | ce866b3 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 64 | printk(KERN_INFO "%lu pages writeback\n", |
| 65 | global_page_state(NR_WRITEBACK)); |
Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 66 | printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); |
Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame] | 67 | printk(KERN_INFO "%lu pages slab\n", |
| 68 | global_page_state(NR_SLAB_RECLAIMABLE) + |
| 69 | global_page_state(NR_SLAB_UNRECLAIMABLE)); |
Christoph Lameter | df849a1 | 2006-06-30 01:55:38 -0700 | [diff] [blame] | 70 | printk(KERN_INFO "%lu pages pagetables\n", |
| 71 | global_page_state(NR_PAGETABLE)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 72 | } |
| 73 | |
| 74 | /* |
| 75 | * Associate a virtual page frame with a given physical page frame |
| 76 | * and protection flags for that frame. |
| 77 | */ |
| 78 | static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) |
| 79 | { |
| 80 | pgd_t *pgd; |
| 81 | pud_t *pud; |
| 82 | pmd_t *pmd; |
| 83 | pte_t *pte; |
| 84 | |
| 85 | pgd = swapper_pg_dir + pgd_index(vaddr); |
| 86 | if (pgd_none(*pgd)) { |
| 87 | BUG(); |
| 88 | return; |
| 89 | } |
| 90 | pud = pud_offset(pgd, vaddr); |
| 91 | if (pud_none(*pud)) { |
| 92 | BUG(); |
| 93 | return; |
| 94 | } |
| 95 | pmd = pmd_offset(pud, vaddr); |
| 96 | if (pmd_none(*pmd)) { |
| 97 | BUG(); |
| 98 | return; |
| 99 | } |
| 100 | pte = pte_offset_kernel(pmd, vaddr); |
Jan Beulich | b0bfece | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 101 | if (pgprot_val(flags)) |
Jan Beulich | aa506dc | 2007-10-17 18:04:33 +0200 | [diff] [blame] | 102 | set_pte_present(&init_mm, vaddr, pte, pfn_pte(pfn, flags)); |
Jan Beulich | b0bfece | 2006-12-07 02:14:09 +0100 | [diff] [blame] | 103 | else |
| 104 | pte_clear(&init_mm, vaddr, pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 105 | |
| 106 | /* |
| 107 | * It's enough to flush this one mapping. |
| 108 | * (PGE mappings get flushed as well) |
| 109 | */ |
| 110 | __flush_tlb_one(vaddr); |
| 111 | } |
| 112 | |
| 113 | /* |
| 114 | * Associate a large virtual page frame with a given physical page frame |
| 115 | * and protection flags for that frame. pfn is for the base of the page, |
| 116 | * vaddr is what the page gets mapped to - both must be properly aligned. |
| 117 | * The pmd must already be instantiated. Assumes PAE mode. |
| 118 | */ |
| 119 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) |
| 120 | { |
| 121 | pgd_t *pgd; |
| 122 | pud_t *pud; |
| 123 | pmd_t *pmd; |
| 124 | |
| 125 | if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ |
Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 126 | printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 127 | return; /* BUG(); */ |
| 128 | } |
| 129 | if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ |
Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 130 | printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 131 | return; /* BUG(); */ |
| 132 | } |
| 133 | pgd = swapper_pg_dir + pgd_index(vaddr); |
| 134 | if (pgd_none(*pgd)) { |
Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 135 | printk(KERN_WARNING "set_pmd_pfn: pgd_none\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 136 | return; /* BUG(); */ |
| 137 | } |
| 138 | pud = pud_offset(pgd, vaddr); |
| 139 | pmd = pmd_offset(pud, vaddr); |
| 140 | set_pmd(pmd, pfn_pmd(pfn, flags)); |
| 141 | /* |
| 142 | * It's enough to flush this one mapping. |
| 143 | * (PGE mappings get flushed as well) |
| 144 | */ |
| 145 | __flush_tlb_one(vaddr); |
| 146 | } |
| 147 | |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 148 | static int fixmaps; |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 149 | unsigned long __FIXADDR_TOP = 0xfffff000; |
| 150 | EXPORT_SYMBOL(__FIXADDR_TOP); |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 151 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 152 | void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) |
| 153 | { |
| 154 | unsigned long address = __fix_to_virt(idx); |
| 155 | |
| 156 | if (idx >= __end_of_fixed_addresses) { |
| 157 | BUG(); |
| 158 | return; |
| 159 | } |
| 160 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 161 | fixmaps++; |
| 162 | } |
| 163 | |
| 164 | /** |
| 165 | * reserve_top_address - reserves a hole in the top of kernel address space |
| 166 | * @reserve - size of hole to reserve |
| 167 | * |
| 168 | * Can be used to relocate the fixmap area and poke a hole in the top |
| 169 | * of kernel address space to make room for a hypervisor. |
| 170 | */ |
| 171 | void reserve_top_address(unsigned long reserve) |
| 172 | { |
| 173 | BUG_ON(fixmaps > 0); |
Zachary Amsden | 7ce0bcf | 2007-02-13 13:26:21 +0100 | [diff] [blame] | 174 | printk(KERN_INFO "Reserving virtual address space above 0x%08x\n", |
| 175 | (int)-reserve); |
Jeremy Fitzhardinge | 052e799 | 2006-09-25 23:32:25 -0700 | [diff] [blame] | 176 | __FIXADDR_TOP = -reserve - PAGE_SIZE; |
| 177 | __VMALLOC_RESERVE += reserve; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 178 | } |
| 179 | |
| 180 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
| 181 | { |
| 182 | return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); |
| 183 | } |
| 184 | |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 185 | pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 186 | { |
| 187 | struct page *pte; |
| 188 | |
| 189 | #ifdef CONFIG_HIGHPTE |
| 190 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); |
| 191 | #else |
| 192 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); |
| 193 | #endif |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 194 | if (pte) |
| 195 | pgtable_page_ctor(pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | return pte; |
| 197 | } |
| 198 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | /* |
| 200 | * List of all pgd's needed for non-PAE so it can invalidate entries |
| 201 | * in both cached and uncached pgd's; not needed for PAE since the |
| 202 | * kernel pmd is shared. If PAE were not to share the pmd a similar |
| 203 | * tactic would be needed. This is essentially codepath-based locking |
| 204 | * against pageattr.c; it is the unique case in which a valid change |
| 205 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. |
| 206 | * vmalloc faults work because attached pagetables are never freed. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | * -- wli |
| 208 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 209 | static inline void pgd_list_add(pgd_t *pgd) |
| 210 | { |
| 211 | struct page *page = virt_to_page(pgd); |
Jeremy Fitzhardinge | e3ed910 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 212 | |
| 213 | list_add(&page->lru, &pgd_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 214 | } |
| 215 | |
| 216 | static inline void pgd_list_del(pgd_t *pgd) |
| 217 | { |
Jeremy Fitzhardinge | e3ed910 | 2008-01-30 13:34:11 +0100 | [diff] [blame] | 218 | struct page *page = virt_to_page(pgd); |
| 219 | |
| 220 | list_del(&page->lru); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | } |
| 222 | |
Jeremy Fitzhardinge | e618c95 | 2008-02-04 16:48:02 +0100 | [diff] [blame] | 223 | #define UNSHARED_PTRS_PER_PGD \ |
| 224 | (SHARED_KERNEL_PMD ? USER_PTRS_PER_PGD : PTRS_PER_PGD) |
Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 225 | |
Jeremy Fitzhardinge | e618c95 | 2008-02-04 16:48:02 +0100 | [diff] [blame] | 226 | static void pgd_ctor(void *p) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | { |
Jeremy Fitzhardinge | e618c95 | 2008-02-04 16:48:02 +0100 | [diff] [blame] | 228 | pgd_t *pgd = p; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 229 | unsigned long flags; |
| 230 | |
Jeremy Fitzhardinge | e618c95 | 2008-02-04 16:48:02 +0100 | [diff] [blame] | 231 | /* Clear usermode parts of PGD */ |
Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 232 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | |
Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 234 | spin_lock_irqsave(&pgd_lock, flags); |
| 235 | |
Jeremy Fitzhardinge | e618c95 | 2008-02-04 16:48:02 +0100 | [diff] [blame] | 236 | /* If the pgd points to a shared pagetable level (either the |
| 237 | ptes in non-PAE, or shared PMD in PAE), then just copy the |
| 238 | references from swapper_pg_dir. */ |
| 239 | if (PAGETABLE_LEVELS == 2 || |
| 240 | (PAGETABLE_LEVELS == 3 && SHARED_KERNEL_PMD)) { |
| 241 | clone_pgd_range(pgd + USER_PTRS_PER_PGD, |
Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 242 | swapper_pg_dir + USER_PTRS_PER_PGD, |
| 243 | KERNEL_PGD_PTRS); |
Jeremy Fitzhardinge | e618c95 | 2008-02-04 16:48:02 +0100 | [diff] [blame] | 244 | paravirt_alloc_pd_clone(__pa(pgd) >> PAGE_SHIFT, |
| 245 | __pa(swapper_pg_dir) >> PAGE_SHIFT, |
| 246 | USER_PTRS_PER_PGD, |
| 247 | KERNEL_PGD_PTRS); |
Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 248 | } |
Jeremy Fitzhardinge | e618c95 | 2008-02-04 16:48:02 +0100 | [diff] [blame] | 249 | |
| 250 | /* list required to sync kernel mapping updates */ |
| 251 | if (!SHARED_KERNEL_PMD) |
| 252 | pgd_list_add(pgd); |
| 253 | |
| 254 | spin_unlock_irqrestore(&pgd_lock, flags); |
Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 255 | } |
Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 256 | |
Adrian Bunk | 2378569 | 2007-07-21 17:11:07 +0200 | [diff] [blame] | 257 | static void pgd_dtor(void *pgd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 258 | { |
| 259 | unsigned long flags; /* can be called from interrupt context */ |
| 260 | |
Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 261 | if (SHARED_KERNEL_PMD) |
| 262 | return; |
Jeremy Fitzhardinge | 5311ab6 | 2007-05-02 19:27:13 +0200 | [diff] [blame] | 263 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 264 | spin_lock_irqsave(&pgd_lock, flags); |
| 265 | pgd_list_del(pgd); |
| 266 | spin_unlock_irqrestore(&pgd_lock, flags); |
| 267 | } |
| 268 | |
Jeremy Fitzhardinge | 8fe3dee | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 269 | #ifdef CONFIG_X86_PAE |
| 270 | /* |
| 271 | * Mop up any pmd pages which may still be attached to the pgd. |
| 272 | * Normally they will be freed by munmap/exit_mmap, but any pmd we |
| 273 | * preallocate which never got a corresponding vma will need to be |
| 274 | * freed manually. |
| 275 | */ |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 276 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) |
Jeremy Fitzhardinge | 8fe3dee | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 277 | { |
| 278 | int i; |
| 279 | |
Jeremy Fitzhardinge | 508bebb | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 280 | for(i = 0; i < UNSHARED_PTRS_PER_PGD; i++) { |
Jeremy Fitzhardinge | 8fe3dee | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 281 | pgd_t pgd = pgdp[i]; |
| 282 | |
| 283 | if (pgd_val(pgd) != 0) { |
| 284 | pmd_t *pmd = (pmd_t *)pgd_page_vaddr(pgd); |
| 285 | |
| 286 | pgdp[i] = native_make_pgd(0); |
| 287 | |
| 288 | paravirt_release_pd(pgd_val(pgd) >> PAGE_SHIFT); |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 289 | pmd_free(mm, pmd); |
Jeremy Fitzhardinge | 8fe3dee | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 290 | } |
| 291 | } |
| 292 | } |
| 293 | |
| 294 | /* |
| 295 | * In PAE mode, we need to do a cr3 reload (=tlb flush) when |
| 296 | * updating the top-level pagetable entries to guarantee the |
| 297 | * processor notices the update. Since this is expensive, and |
| 298 | * all 4 top-level entries are used almost immediately in a |
| 299 | * new process's life, we just pre-populate them here. |
Jeremy Fitzhardinge | 508bebb | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 300 | * |
| 301 | * Also, if we're in a paravirt environment where the kernel pmd is |
| 302 | * not shared between pagetables (!SHARED_KERNEL_PMDS), we allocate |
| 303 | * and initialize the kernel pmds here. |
Jeremy Fitzhardinge | 8fe3dee | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 304 | */ |
| 305 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) |
| 306 | { |
| 307 | pud_t *pud; |
| 308 | unsigned long addr; |
| 309 | int i; |
| 310 | |
| 311 | pud = pud_offset(pgd, 0); |
Jeremy Fitzhardinge | 508bebb | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 312 | for (addr = i = 0; i < UNSHARED_PTRS_PER_PGD; |
| 313 | i++, pud++, addr += PUD_SIZE) { |
Jeremy Fitzhardinge | 8fe3dee | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 314 | pmd_t *pmd = pmd_alloc_one(mm, addr); |
| 315 | |
| 316 | if (!pmd) { |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 317 | pgd_mop_up_pmds(mm, pgd); |
Jeremy Fitzhardinge | 8fe3dee | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 318 | return 0; |
| 319 | } |
| 320 | |
Jeremy Fitzhardinge | 508bebb | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 321 | if (i >= USER_PTRS_PER_PGD) |
| 322 | memcpy(pmd, (pmd_t *)pgd_page_vaddr(swapper_pg_dir[i]), |
| 323 | sizeof(pmd_t) * PTRS_PER_PMD); |
| 324 | |
Jeremy Fitzhardinge | 8fe3dee | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 325 | pud_populate(mm, pud, pmd); |
| 326 | } |
| 327 | |
| 328 | return 1; |
| 329 | } |
| 330 | #else /* !CONFIG_X86_PAE */ |
| 331 | /* No need to prepopulate any pagetable entries in non-PAE modes. */ |
| 332 | static int pgd_prepopulate_pmd(struct mm_struct *mm, pgd_t *pgd) |
| 333 | { |
| 334 | return 1; |
| 335 | } |
| 336 | |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 337 | static void pgd_mop_up_pmds(struct mm_struct *mm, pgd_t *pgdp) |
Jeremy Fitzhardinge | 8fe3dee | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 338 | { |
| 339 | } |
| 340 | #endif /* CONFIG_X86_PAE */ |
| 341 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 342 | pgd_t *pgd_alloc(struct mm_struct *mm) |
| 343 | { |
Thomas Gleixner | 985a34b | 2008-03-09 13:14:37 +0100 | [diff] [blame] | 344 | pgd_t *pgd = (pgd_t *)__get_free_page(GFP_KERNEL | __GFP_ZERO); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 345 | |
Thomas Gleixner | 985a34b | 2008-03-09 13:14:37 +0100 | [diff] [blame] | 346 | /* so that alloc_pd can use it */ |
| 347 | mm->pgd = pgd; |
| 348 | if (pgd) |
| 349 | pgd_ctor(pgd); |
Jeremy Fitzhardinge | 6c43545 | 2008-01-30 13:33:39 +0100 | [diff] [blame] | 350 | |
Jeremy Fitzhardinge | 8fe3dee | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 351 | if (pgd && !pgd_prepopulate_pmd(mm, pgd)) { |
Thomas Gleixner | 985a34b | 2008-03-09 13:14:37 +0100 | [diff] [blame] | 352 | pgd_dtor(pgd); |
| 353 | free_page((unsigned long)pgd); |
Jeremy Fitzhardinge | 8fe3dee | 2008-01-30 13:33:40 +0100 | [diff] [blame] | 354 | pgd = NULL; |
| 355 | } |
| 356 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 357 | return pgd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | } |
| 359 | |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 360 | void pgd_free(struct mm_struct *mm, pgd_t *pgd) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 361 | { |
Benjamin Herrenschmidt | 5e54197 | 2008-02-04 22:29:14 -0800 | [diff] [blame] | 362 | pgd_mop_up_pmds(mm, pgd); |
Thomas Gleixner | 985a34b | 2008-03-09 13:14:37 +0100 | [diff] [blame] | 363 | pgd_dtor(pgd); |
| 364 | free_page((unsigned long)pgd); |
Christoph Lameter | f1d1a84 | 2007-05-12 11:15:24 -0700 | [diff] [blame] | 365 | } |
Ingo Molnar | 5aa0508 | 2008-01-31 22:05:48 +0100 | [diff] [blame] | 366 | |
| 367 | void __pte_free_tlb(struct mmu_gather *tlb, struct page *pte) |
| 368 | { |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 369 | pgtable_page_dtor(pte); |
Ingo Molnar | 5aa0508 | 2008-01-31 22:05:48 +0100 | [diff] [blame] | 370 | paravirt_release_pt(page_to_pfn(pte)); |
| 371 | tlb_remove_page(tlb, pte); |
| 372 | } |
| 373 | |
| 374 | #ifdef CONFIG_X86_PAE |
| 375 | |
| 376 | void __pmd_free_tlb(struct mmu_gather *tlb, pmd_t *pmd) |
| 377 | { |
Ingo Molnar | 5aa0508 | 2008-01-31 22:05:48 +0100 | [diff] [blame] | 378 | paravirt_release_pd(__pa(pmd) >> PAGE_SHIFT); |
| 379 | tlb_remove_page(tlb, virt_to_page(pmd)); |
| 380 | } |
| 381 | |
| 382 | #endif |
Ingo Molnar | 9fc3411 | 2008-03-03 09:53:17 +0100 | [diff] [blame] | 383 | |
| 384 | int pmd_bad(pmd_t pmd) |
| 385 | { |
| 386 | WARN_ON_ONCE(pmd_bad_v1(pmd) != pmd_bad_v2(pmd)); |
| 387 | |
| 388 | return pmd_bad_v1(pmd); |
| 389 | } |