Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/i386/mm/pgtable.c |
| 3 | */ |
| 4 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | #include <linux/sched.h> |
| 6 | #include <linux/kernel.h> |
| 7 | #include <linux/errno.h> |
| 8 | #include <linux/mm.h> |
| 9 | #include <linux/swap.h> |
| 10 | #include <linux/smp.h> |
| 11 | #include <linux/highmem.h> |
| 12 | #include <linux/slab.h> |
| 13 | #include <linux/pagemap.h> |
| 14 | #include <linux/spinlock.h> |
| 15 | |
| 16 | #include <asm/system.h> |
| 17 | #include <asm/pgtable.h> |
| 18 | #include <asm/pgalloc.h> |
| 19 | #include <asm/fixmap.h> |
| 20 | #include <asm/e820.h> |
| 21 | #include <asm/tlb.h> |
| 22 | #include <asm/tlbflush.h> |
| 23 | |
| 24 | void show_mem(void) |
| 25 | { |
| 26 | int total = 0, reserved = 0; |
| 27 | int shared = 0, cached = 0; |
| 28 | int highmem = 0; |
| 29 | struct page *page; |
| 30 | pg_data_t *pgdat; |
| 31 | unsigned long i; |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 32 | unsigned long flags; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 33 | |
Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 34 | printk(KERN_INFO "Mem-info:\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | show_free_areas(); |
Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 36 | printk(KERN_INFO "Free swap: %6ldkB\n", nr_swap_pages<<(PAGE_SHIFT-10)); |
KAMEZAWA Hiroyuki | ec936fc | 2006-03-27 01:15:59 -0800 | [diff] [blame] | 37 | for_each_online_pgdat(pgdat) { |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 38 | pgdat_resize_lock(pgdat, &flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 39 | for (i = 0; i < pgdat->node_spanned_pages; ++i) { |
Dave Hansen | 408fde8 | 2005-06-23 00:07:37 -0700 | [diff] [blame] | 40 | page = pgdat_page_nr(pgdat, i); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 41 | total++; |
| 42 | if (PageHighMem(page)) |
| 43 | highmem++; |
| 44 | if (PageReserved(page)) |
| 45 | reserved++; |
| 46 | else if (PageSwapCache(page)) |
| 47 | cached++; |
| 48 | else if (page_count(page)) |
| 49 | shared += page_count(page) - 1; |
| 50 | } |
Dave Hansen | 208d54e | 2005-10-29 18:16:52 -0700 | [diff] [blame] | 51 | pgdat_resize_unlock(pgdat, &flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 52 | } |
Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 53 | printk(KERN_INFO "%d pages of RAM\n", total); |
| 54 | printk(KERN_INFO "%d pages of HIGHMEM\n", highmem); |
| 55 | printk(KERN_INFO "%d reserved pages\n", reserved); |
| 56 | printk(KERN_INFO "%d pages shared\n", shared); |
| 57 | printk(KERN_INFO "%d pages swap cached\n", cached); |
Martin J. Bligh | 6f4e1e5 | 2005-06-23 00:08:08 -0700 | [diff] [blame] | 58 | |
Christoph Lameter | b1e7a8f | 2006-06-30 01:55:39 -0700 | [diff] [blame] | 59 | printk(KERN_INFO "%lu pages dirty\n", global_page_state(NR_FILE_DIRTY)); |
Christoph Lameter | ce866b3 | 2006-06-30 01:55:40 -0700 | [diff] [blame] | 60 | printk(KERN_INFO "%lu pages writeback\n", |
| 61 | global_page_state(NR_WRITEBACK)); |
Christoph Lameter | 65ba55f | 2006-06-30 01:55:34 -0700 | [diff] [blame] | 62 | printk(KERN_INFO "%lu pages mapped\n", global_page_state(NR_FILE_MAPPED)); |
Christoph Lameter | 972d1a7 | 2006-09-25 23:31:51 -0700 | [diff] [blame^] | 63 | printk(KERN_INFO "%lu pages slab\n", |
| 64 | global_page_state(NR_SLAB_RECLAIMABLE) + |
| 65 | global_page_state(NR_SLAB_UNRECLAIMABLE)); |
Christoph Lameter | df849a1 | 2006-06-30 01:55:38 -0700 | [diff] [blame] | 66 | printk(KERN_INFO "%lu pages pagetables\n", |
| 67 | global_page_state(NR_PAGETABLE)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 68 | } |
| 69 | |
| 70 | /* |
| 71 | * Associate a virtual page frame with a given physical page frame |
| 72 | * and protection flags for that frame. |
| 73 | */ |
| 74 | static void set_pte_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) |
| 75 | { |
| 76 | pgd_t *pgd; |
| 77 | pud_t *pud; |
| 78 | pmd_t *pmd; |
| 79 | pte_t *pte; |
| 80 | |
| 81 | pgd = swapper_pg_dir + pgd_index(vaddr); |
| 82 | if (pgd_none(*pgd)) { |
| 83 | BUG(); |
| 84 | return; |
| 85 | } |
| 86 | pud = pud_offset(pgd, vaddr); |
| 87 | if (pud_none(*pud)) { |
| 88 | BUG(); |
| 89 | return; |
| 90 | } |
| 91 | pmd = pmd_offset(pud, vaddr); |
| 92 | if (pmd_none(*pmd)) { |
| 93 | BUG(); |
| 94 | return; |
| 95 | } |
| 96 | pte = pte_offset_kernel(pmd, vaddr); |
| 97 | /* <pfn,flags> stored as-is, to permit clearing entries */ |
| 98 | set_pte(pte, pfn_pte(pfn, flags)); |
| 99 | |
| 100 | /* |
| 101 | * It's enough to flush this one mapping. |
| 102 | * (PGE mappings get flushed as well) |
| 103 | */ |
| 104 | __flush_tlb_one(vaddr); |
| 105 | } |
| 106 | |
| 107 | /* |
| 108 | * Associate a large virtual page frame with a given physical page frame |
| 109 | * and protection flags for that frame. pfn is for the base of the page, |
| 110 | * vaddr is what the page gets mapped to - both must be properly aligned. |
| 111 | * The pmd must already be instantiated. Assumes PAE mode. |
| 112 | */ |
| 113 | void set_pmd_pfn(unsigned long vaddr, unsigned long pfn, pgprot_t flags) |
| 114 | { |
| 115 | pgd_t *pgd; |
| 116 | pud_t *pud; |
| 117 | pmd_t *pmd; |
| 118 | |
| 119 | if (vaddr & (PMD_SIZE-1)) { /* vaddr is misaligned */ |
Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 120 | printk(KERN_WARNING "set_pmd_pfn: vaddr misaligned\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | return; /* BUG(); */ |
| 122 | } |
| 123 | if (pfn & (PTRS_PER_PTE-1)) { /* pfn is misaligned */ |
Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 124 | printk(KERN_WARNING "set_pmd_pfn: pfn misaligned\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 125 | return; /* BUG(); */ |
| 126 | } |
| 127 | pgd = swapper_pg_dir + pgd_index(vaddr); |
| 128 | if (pgd_none(*pgd)) { |
Christophe Lucas | f90e718 | 2005-06-25 14:59:24 -0700 | [diff] [blame] | 129 | printk(KERN_WARNING "set_pmd_pfn: pgd_none\n"); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 130 | return; /* BUG(); */ |
| 131 | } |
| 132 | pud = pud_offset(pgd, vaddr); |
| 133 | pmd = pmd_offset(pud, vaddr); |
| 134 | set_pmd(pmd, pfn_pmd(pfn, flags)); |
| 135 | /* |
| 136 | * It's enough to flush this one mapping. |
| 137 | * (PGE mappings get flushed as well) |
| 138 | */ |
| 139 | __flush_tlb_one(vaddr); |
| 140 | } |
| 141 | |
| 142 | void __set_fixmap (enum fixed_addresses idx, unsigned long phys, pgprot_t flags) |
| 143 | { |
| 144 | unsigned long address = __fix_to_virt(idx); |
| 145 | |
| 146 | if (idx >= __end_of_fixed_addresses) { |
| 147 | BUG(); |
| 148 | return; |
| 149 | } |
| 150 | set_pte_pfn(address, phys >> PAGE_SHIFT, flags); |
| 151 | } |
| 152 | |
| 153 | pte_t *pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
| 154 | { |
| 155 | return (pte_t *)__get_free_page(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO); |
| 156 | } |
| 157 | |
| 158 | struct page *pte_alloc_one(struct mm_struct *mm, unsigned long address) |
| 159 | { |
| 160 | struct page *pte; |
| 161 | |
| 162 | #ifdef CONFIG_HIGHPTE |
| 163 | pte = alloc_pages(GFP_KERNEL|__GFP_HIGHMEM|__GFP_REPEAT|__GFP_ZERO, 0); |
| 164 | #else |
| 165 | pte = alloc_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, 0); |
| 166 | #endif |
| 167 | return pte; |
| 168 | } |
| 169 | |
| 170 | void pmd_ctor(void *pmd, kmem_cache_t *cache, unsigned long flags) |
| 171 | { |
| 172 | memset(pmd, 0, PTRS_PER_PMD*sizeof(pmd_t)); |
| 173 | } |
| 174 | |
| 175 | /* |
| 176 | * List of all pgd's needed for non-PAE so it can invalidate entries |
| 177 | * in both cached and uncached pgd's; not needed for PAE since the |
| 178 | * kernel pmd is shared. If PAE were not to share the pmd a similar |
| 179 | * tactic would be needed. This is essentially codepath-based locking |
| 180 | * against pageattr.c; it is the unique case in which a valid change |
| 181 | * of kernel pagetables can't be lazily synchronized by vmalloc faults. |
| 182 | * vmalloc faults work because attached pagetables are never freed. |
| 183 | * The locking scheme was chosen on the basis of manfred's |
| 184 | * recommendations and having no core impact whatsoever. |
| 185 | * -- wli |
| 186 | */ |
| 187 | DEFINE_SPINLOCK(pgd_lock); |
| 188 | struct page *pgd_list; |
| 189 | |
| 190 | static inline void pgd_list_add(pgd_t *pgd) |
| 191 | { |
| 192 | struct page *page = virt_to_page(pgd); |
| 193 | page->index = (unsigned long)pgd_list; |
| 194 | if (pgd_list) |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 195 | set_page_private(pgd_list, (unsigned long)&page->index); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 196 | pgd_list = page; |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 197 | set_page_private(page, (unsigned long)&pgd_list); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 198 | } |
| 199 | |
| 200 | static inline void pgd_list_del(pgd_t *pgd) |
| 201 | { |
| 202 | struct page *next, **pprev, *page = virt_to_page(pgd); |
| 203 | next = (struct page *)page->index; |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 204 | pprev = (struct page **)page_private(page); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | *pprev = next; |
| 206 | if (next) |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 207 | set_page_private(next, (unsigned long)pprev); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 208 | } |
| 209 | |
| 210 | void pgd_ctor(void *pgd, kmem_cache_t *cache, unsigned long unused) |
| 211 | { |
| 212 | unsigned long flags; |
| 213 | |
Zachary Amsden | d7271b1 | 2005-09-03 15:56:50 -0700 | [diff] [blame] | 214 | if (PTRS_PER_PMD == 1) { |
| 215 | memset(pgd, 0, USER_PTRS_PER_PGD*sizeof(pgd_t)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 216 | spin_lock_irqsave(&pgd_lock, flags); |
Zachary Amsden | d7271b1 | 2005-09-03 15:56:50 -0700 | [diff] [blame] | 217 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 218 | |
Zachary Amsden | d7271b1 | 2005-09-03 15:56:50 -0700 | [diff] [blame] | 219 | clone_pgd_range((pgd_t *)pgd + USER_PTRS_PER_PGD, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 220 | swapper_pg_dir + USER_PTRS_PER_PGD, |
Zachary Amsden | d7271b1 | 2005-09-03 15:56:50 -0700 | [diff] [blame] | 221 | KERNEL_PGD_PTRS); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 222 | if (PTRS_PER_PMD > 1) |
| 223 | return; |
| 224 | |
| 225 | pgd_list_add(pgd); |
| 226 | spin_unlock_irqrestore(&pgd_lock, flags); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 227 | } |
| 228 | |
| 229 | /* never called when PTRS_PER_PMD > 1 */ |
| 230 | void pgd_dtor(void *pgd, kmem_cache_t *cache, unsigned long unused) |
| 231 | { |
| 232 | unsigned long flags; /* can be called from interrupt context */ |
| 233 | |
| 234 | spin_lock_irqsave(&pgd_lock, flags); |
| 235 | pgd_list_del(pgd); |
| 236 | spin_unlock_irqrestore(&pgd_lock, flags); |
| 237 | } |
| 238 | |
| 239 | pgd_t *pgd_alloc(struct mm_struct *mm) |
| 240 | { |
| 241 | int i; |
| 242 | pgd_t *pgd = kmem_cache_alloc(pgd_cache, GFP_KERNEL); |
| 243 | |
| 244 | if (PTRS_PER_PMD == 1 || !pgd) |
| 245 | return pgd; |
| 246 | |
| 247 | for (i = 0; i < USER_PTRS_PER_PGD; ++i) { |
| 248 | pmd_t *pmd = kmem_cache_alloc(pmd_cache, GFP_KERNEL); |
| 249 | if (!pmd) |
| 250 | goto out_oom; |
| 251 | set_pgd(&pgd[i], __pgd(1 + __pa(pmd))); |
| 252 | } |
| 253 | return pgd; |
| 254 | |
| 255 | out_oom: |
| 256 | for (i--; i >= 0; i--) |
| 257 | kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); |
| 258 | kmem_cache_free(pgd_cache, pgd); |
| 259 | return NULL; |
| 260 | } |
| 261 | |
| 262 | void pgd_free(pgd_t *pgd) |
| 263 | { |
| 264 | int i; |
| 265 | |
| 266 | /* in the PAE case user pgd entries are overwritten before usage */ |
| 267 | if (PTRS_PER_PMD > 1) |
| 268 | for (i = 0; i < USER_PTRS_PER_PGD; ++i) |
| 269 | kmem_cache_free(pmd_cache, (void *)__va(pgd_val(pgd[i])-1)); |
Hugh Dickins | e0da382 | 2005-04-19 13:29:15 -0700 | [diff] [blame] | 270 | /* in the non-PAE case, free_pgtables() clears user pgd entries */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 271 | kmem_cache_free(pgd_cache, pgd); |
| 272 | } |